• Main Page
  • Classes
  • Files
  • File List
  • File Members

particles/bvals_particle.c

Go to the documentation of this file.
00001 #include "../copyright.h"
00002 /*============================================================================*/
00003 /*! \file bvals_particle.c
00004  *  \brief Sets boundary conditions for particles. 
00005  *
00006  * PURPOSE: Sets boundary conditions for particles. The basic algorithm is
00007  *   similar to the MHD boundary conditions. In setting B.C., particles are
00008  *   always packed to the send buffer (for both MPI and non-MPI cases), then
00009  *   unpacked with certain shifts in position or velocity. In the case of MPI,
00010  *   two communications are needed for sending and receiving particle, first on
00011  *   the number of particles to be sent/received, then to send/receive real
00012  *   data. The shearing box B.C. is first treated as periodic B.C., then move
00013  *   the particles in the ghost cells accordingly. Advection of particles when
00014  *   FARGO is turned on is also included.
00015  * 
00016  * CONTAINS PUBLIC FUNCTIONS:
00017  * - set_bvals_particle()
00018  * - advect_particles()
00019  * - set_bvals_particle_init()
00020  * - set_bvals_particle_fun()
00021  * - set_bvals_particle_destruct()
00022  *
00023  * PRIVATE FUNCTION PROTOTYPES:
00024  * - realloc_???()            - reallocate send/recv buffer
00025  * - update_particle_status() - reset particle status (either ghost or grid)
00026  * - reflect_???_particle()   - apply reflecting BCs at boundary ???
00027  * - outflow_particle()       - apply outflow BCs at boundary ???
00028  * - periodic_???_particle()  - apply periodic BCs at boundary ???
00029  * - packing_???_particle()   - pack particles at boundary ???
00030  * - shift_packed_particle()  - shift packed particle (v or x) by certain amount
00031  * - unpack_particle()        - upack received particles
00032  * - shearingbox_???_particle()-shearing box BC at boundary ???
00033  * - packing_???_particle_shear()- pack particles for shearing box BC
00034  * - packing_particle_fargo() - pack particles for FARGO
00035  * - gridshift()              - calculate shift in grid in y for FARGO
00036  *
00037  * History:
00038  * - Created:   Emmanuel Jacquet        May 2008
00039  * - Rewritten: Xuening Bai             Feb. 2009                             */
00040 /*============================================================================*/
00041 #include <stdio.h>
00042 #include <stdlib.h>
00043 #include <math.h>
00044 #include "../defs.h"
00045 #include "../athena.h"
00046 #include "../prototypes.h"
00047 #include "prototypes.h"
00048 #include "particle.h"
00049 #include "../globals.h"
00050 
00051 
00052 #ifdef PARTICLES         /* endif at the end of the file */
00053 
00054 /* particle structure size */
00055 #ifdef MPI_PARALLEL
00056 #define NVAR_P 10
00057 #else
00058 #define NVAR_P 9
00059 #endif
00060 
00061 /* send and receive buffer, size dynamically determined
00062  * They are mainly used for MPI, and shearing box.
00063  */
00064 static double *send_buf = NULL, *recv_buf = NULL;
00065 static long NBUF;        /* buffer size unit (in number of particle) */
00066 static long send_bufsize;/* size of the send buffer (in unit of particles) */
00067 static long recv_bufsize;/* size of the recv buffer (in unit of particles) */
00068 static int  nbc;         /* number of boundary layers for particle BC */
00069 
00070 /* processor indices in the computational domain */
00071 static int my_iproc, my_jproc, my_kproc;
00072 /* min and max coordinate limits of the computational domain */
00073 static Real x1min,x1max,x2min,x2max,x3min,x3max;
00074 static Real Lx1, Lx2, Lx3;/* domain size in x1, x2, x3 direction */
00075 static int NShuffle;      /* number of time steps for resorting particles */
00076 
00077 /* boundary condition function pointers. local to this function  */
00078 static VBCFun_t apply_ix1 = NULL, apply_ox1 = NULL;
00079 static VBCFun_t apply_ix2 = NULL, apply_ox2 = NULL;
00080 static VBCFun_t apply_ix3 = NULL, apply_ox3 = NULL;
00081 
00082 /*==============================================================================
00083  * PRIVATE FUNCTION PROTOTYPES:
00084  *   realloc_???()            - reallocate send/recv buffer
00085  *   update_particle_status() - reset particle status (either ghost or grid)
00086  *   reflect_???_particle()   - apply reflecting BCs at boundary ???
00087  *   outflow_particle()       - apply outflow BCs at boundary ???
00088  *   periodic_???_particle()  - apply periodic BCs at boundary ???
00089  *   packing_???_particle()   - pack particles at boundary ???
00090  *   shift_packed_particle()  - shift packed particle (v or x) by certain amount
00091  *   unpack_particle()        - upack received particles
00092  *   shearingbox_???_particle()-shearing box BC at boundary ???
00093  *   packing_???_particle_shear()- pack particles for shearing box BC
00094  *   packing_particle_fargo() - pack particles for FARGO
00095  *   gridshift()              - calculate shift in grid in y for FARGO
00096  *============================================================================*/
00097 
00098 static void realloc_sendbuf();
00099 static void realloc_recvbuf();
00100 
00101 static void update_particle_status(Grid *pG);
00102 
00103 static void reflect_ix1_particle(Grid *pG);
00104 static void reflect_ox1_particle(Grid *pG);
00105 static void reflect_ix2_particle(Grid *pG);
00106 static void reflect_ox2_particle(Grid *pG);
00107 static void reflect_ix3_particle(Grid *pG);
00108 static void reflect_ox3_particle(Grid *pG);
00109 
00110 static void outflow_particle(Grid *pG);
00111 
00112 static void periodic_ix1_particle(Grid *pG);
00113 static void periodic_ox1_particle(Grid *pG);
00114 static void periodic_ix2_particle(Grid *pG);
00115 static void periodic_ox2_particle(Grid *pG);
00116 static void periodic_ix3_particle(Grid *pG);
00117 static void periodic_ox3_particle(Grid *pG);
00118 
00119 static long packing_ix1_particle(Grid *pG, int nlayer);
00120 static long packing_ox1_particle(Grid *pG, int nlayer);
00121 static long packing_ix2_particle(Grid *pG, int nlayer);
00122 static long packing_ox2_particle(Grid *pG, int nlayer);
00123 static long packing_ix3_particle(Grid *pG, int nlayer);
00124 static long packing_ox3_particle(Grid *pG, int nlayer);
00125 static void packing_one_particle(Grain *cur, long n, short pos);
00126 static void shift_packed_particle(double *buf, long n, int index, double shift);
00127 static void unpack_particle(Grid *pG, double *buf, long n);
00128 
00129 #ifdef SHEARING_BOX
00130 static void shearingbox_ix1_particle(Grid *pG, Domain *pD, long numpar);
00131 static void shearingbox_ox1_particle(Grid *pG, Domain *pD, long numpar);
00132 
00133 static long packing_ix1_particle_shear(Grid *pG, int reg, long numpar);
00134 static long packing_ox1_particle_shear(Grid *pG, int reg, long numpar);
00135 
00136 #ifdef FARGO
00137 static long packing_particle_fargo(Grid *pG, Real yl, Real yu);
00138 static int gridshift(Real shift);
00139 #endif /* FARGO */
00140 
00141 #endif /* SHEARING_BOX */
00142 
00143 
00144 /*=========================== PUBLIC FUNCTIONS ===============================*/
00145 /*----------------------------------------------------------------------------*/
00146 /*! \fn void set_bvals_particle(Grid *pG, Domain *pD)
00147  *  \brief Calls appropriate functions to set particle BCs. 
00148  *
00149  *   The
00150  *   function pointers (*apply_???) are set during initialization by
00151  *   set_bvals_particle_init() to be either a user-defined function, or one of
00152  *   the functions corresponding to reflecting, periodic, or outflow.  If the
00153  *   left- or right-Grid ID numbers are >= 1 (neighboring grids exist), then
00154  *   MPI calls are used.
00155  *
00156  * Order for updating boundary conditions must always be x1-x2-x3 in order to
00157  * fill the corner cells properly
00158  */
00159 
00160 void set_bvals_particle(Grid *pG, Domain *pD)
00161 {
00162 #ifdef MPI_PARALLEL
00163   int err;
00164   long cnt_send, cnt_recv;
00165   MPI_Request rq;
00166   MPI_Status stat;
00167 #endif /* MPI_PARALLEL */
00168 #ifdef SHEARING_BOX
00169   long numpar;  /* number of particles before applying B.C. */
00170   int vyind;    /* index for vy, 5 for 3D (x1,x2,x3)=(X,Y,Z),
00171                                  6 for 2D (x1,x2,x3)=(X,Z,Y) */
00172 #endif
00173 
00174 /*--- Step 1. ------------------------------------------------------------------
00175  * shuffle if necessary */
00176 
00177   /* shuffle every NShuffle steps */
00178   /* if NShuffle is not positive, don't shuffle */
00179   if ((NShuffle>0) && (fmod(pG->nstep, NShuffle)<0.1))
00180     shuffle(pG);
00181 
00182 /*--- Step 2. ------------------------------------------------------------------
00183  * Boundary Conditions in x1-direction */
00184 
00185   if (pG->Nx1 > 1){
00186 
00187 #ifdef SHEARING_BOX
00188   numpar = pG->nparticle;
00189   if (pG->Nx3 > 1) /* 3D shearing box (x1,x2,x3)=(X,Y,Z) */
00190     vyind = 5;
00191   else             /* 2D shearing box (x1,x2,x3)=(X,Z,Y) */
00192     vyind = 6;
00193 #endif
00194 
00195 #ifdef MPI_PARALLEL
00196 
00197 /* MPI blocks to both left and right */
00198     if (pG->rx1_id >= 0 && pG->lx1_id >= 0) {
00199 
00200     /*-------- sub-step 1: send to right and receive from left ------------*/
00201 
00202       /* Post a non-blocking receive for the data size from the left grid */
00203       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx1_id,
00204                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00205       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00206 
00207       /* packing particle on the right to send buffer */
00208       cnt_send = packing_ox1_particle(pG, nghost);
00209 
00210       /* send buffer size to the right grid */
00211       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx1_id,
00212                                 boundary_particle_tag, MPI_COMM_WORLD);
00213       if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00214 
00215       if (my_iproc == pD->NGrid_x1-1) {
00216         /* physical boundary on the rignt in periodic B.C. */
00217         shift_packed_particle(send_buf, cnt_send, 1, -Lx1);
00218 #ifdef SHEARING_BOX
00219 /* Note in 2D shearing sheet, (X,Y,Z)=(x1,x3,x2) */
00220 #ifndef FARGO
00221         /* velocity shift for shearing box */
00222         shift_packed_particle(send_buf, cnt_send, vyind, vshear);
00223 #endif
00224 #endif /* SHEARING_BOX */
00225       }
00226 
00227       /* receive buffer size from the left grid */
00228       err = MPI_Wait(&rq, &stat);
00229       if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00230 
00231       /* check space for the receive buffer */
00232       if ((cnt_recv+1) >= recv_bufsize)
00233         realloc_recvbuf();
00234 
00235       /* send send_buf to the right and obtain recv_buf from the left */
00236       if (cnt_recv > 0) {
00237         /* Post a non-blocking receive for the input data from the left grid */
00238         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00239                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00240         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00241       }
00242 
00243       if (cnt_send > 0) {
00244         /* send buffer to the right grid */
00245         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00246                                  boundary_particle_tag, MPI_COMM_WORLD);
00247         if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00248       }
00249 
00250       if (cnt_recv > 0) {
00251         /* receive buffer from the left grid */
00252         err = MPI_Wait(&rq, &stat);
00253         if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00254 
00255         /* unpack the received particle */
00256         unpack_particle(pG, recv_buf, cnt_recv);
00257       }
00258 
00259     /*-------- sub-step 2: send to left and receive from right --------*/
00260 
00261       /* Post a non-blocking receive for the data size from the right grid */
00262       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx1_id,
00263                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00264       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00265 
00266       /* packing particle on the left to send buffer */
00267       cnt_send = packing_ix1_particle(pG, nghost);
00268 
00269       /* send buffer size to the left grid */
00270       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx1_id,
00271                                 boundary_particle_tag, MPI_COMM_WORLD);
00272       if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00273 
00274       if (my_iproc == 0) {
00275         /* physical boundary on the left in periodic B.C. */
00276         shift_packed_particle(send_buf, cnt_send, 1, Lx1);
00277 #ifdef SHEARING_BOX
00278 /* Note in 2D shearing sheet, (X,Y,Z)=(x1,x3,x2) */
00279 #ifndef FARGO
00280         /* velocity shift for shearing box */
00281         shift_packed_particle(send_buf, cnt_send, vyind, -vshear);
00282 #endif
00283 #endif /* SHEARING_BOX */
00284       }
00285 
00286       /* receive buffer size from the right grid */
00287       err = MPI_Wait(&rq, &stat);
00288       if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00289 
00290       /* check space for the receive buffer */
00291       if ((cnt_recv+1) >= recv_bufsize)
00292         realloc_recvbuf();
00293 
00294       /* send send_buf to the left and obtain recv_buf from the right */
00295       if (cnt_recv > 0) {
00296         /* Post a non-blocking receive for the input data from the right grid */
00297         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00298                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00299         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00300       }
00301 
00302       if (cnt_send > 0) {
00303         /* send buffer to the right grid */
00304         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00305                                  boundary_particle_tag, MPI_COMM_WORLD);
00306         if(err) ath_error("[send_ox1_particle]: MPI_Send error = %d\n",err);
00307       }
00308 
00309       if (cnt_recv > 0) {
00310         /* receive buffer from the left grid */
00311         err = MPI_Wait(&rq, &stat);
00312         if(err) ath_error("[receive_ox1_particle]: MPI_Wait error = %d\n",err);
00313 
00314         /* unpack the received particle */
00315         unpack_particle(pG, recv_buf, cnt_recv);
00316       }
00317     }
00318 
00319 /* Physical boundary on left, MPI block on right */
00320     if ((pG->rx1_id >= 0) && (pG->lx1_id < 0)) {
00321 
00322     /*-------- sub-step 1: send to right  --------*/
00323 
00324       /* packing particle on the right to send buffer */
00325       cnt_send = packing_ox1_particle(pG, nghost);
00326 
00327       /* send buffer size to the right grid */
00328       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx1_id,
00329                                 boundary_particle_tag, MPI_COMM_WORLD);
00330       if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00331 
00332       if (cnt_send > 0) {
00333         /* send buffer to the right grid */
00334         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00335                                  boundary_particle_tag, MPI_COMM_WORLD);
00336         if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00337       }
00338 
00339     /*-------- sub-step 2: apply left boundary condition --------*/
00340       (*apply_ix1)(pG);
00341 
00342     /*-------- sub-step 3: receive from right  --------*/
00343 
00344       /* Post a non-blocking receive for the data size from the right grid */
00345       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx1_id,
00346                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00347       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00348 
00349      /* receive buffer size from the right grid */
00350       err = MPI_Wait(&rq, &stat);
00351       if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00352 
00353       /* check space for the receive buffer */
00354       if ((cnt_recv+1) >= recv_bufsize)
00355         realloc_recvbuf();
00356 
00357       if (cnt_recv > 0) {
00358         /* Post a non-blocking receive for the input data from the right grid */
00359         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00360                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00361         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00362 
00363         /* receive buffer from the left grid */
00364         err = MPI_Wait(&rq, &stat);
00365         if(err) ath_error("[receive_ox1_particle]: MPI_Wait error = %d\n",err);
00366 
00367         /* unpack the received particle */
00368         unpack_particle(pG, recv_buf, cnt_recv);
00369       }
00370     }
00371 
00372 /* MPI block on left, Physical boundary on right */
00373     if ((pG->lx1_id >= 0) && (pG->rx1_id < 0)) {
00374 
00375     /*-------- sub-step 1: receive from left --------*/
00376 
00377       /* Post a non-blocking receive for the data size from the left grid */
00378       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx1_id,
00379                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00380       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00381 
00382       /* receive buffer size from the left grid */
00383       err = MPI_Wait(&rq, &stat);
00384       if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00385 
00386       /* check space for the receive buffer */
00387       if ((cnt_recv+1) >= recv_bufsize)
00388         realloc_recvbuf();
00389 
00390       if (cnt_recv > 0) {
00391         /* Post a non-blocking receive for the input data from the left grid */
00392         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00393                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00394         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00395       }
00396 
00397     /*-------- sub-step 2: apply right boundary condition --------*/
00398       (*apply_ox1)(pG);
00399 
00400     /*-------- sub-step 3: send to left --------*/
00401 
00402       /* packing particle on the left to send buffer */
00403       cnt_send = packing_ix1_particle(pG, nghost);
00404 
00405       /* send buffer size to the left grid */
00406       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx1_id,
00407                                 boundary_particle_tag, MPI_COMM_WORLD);
00408       if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00409 
00410       if (cnt_send > 0) {
00411         /* send buffer to the right grid */
00412         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00413                                  boundary_particle_tag, MPI_COMM_WORLD);
00414         if(err) ath_error("[send_ox1_particle]: MPI_Send error = %d\n",err);
00415       }
00416 
00417     /*-------- sub-step 1: receive from left (cont) --------*/
00418 
00419       if (cnt_recv > 0) {
00420         /* receive buffer from the left grid */
00421         err = MPI_Wait(&rq, &stat);
00422         if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00423 
00424         /* unpack the received particle */
00425         unpack_particle(pG, recv_buf, cnt_recv);
00426       }
00427     }
00428 #endif /* MPI_PARALLEL */
00429 
00430 /* Physical boundaries on both left and right */
00431     if (pG->rx1_id < 0 && pG->lx1_id < 0) {
00432       (*apply_ix1)(pG);
00433       (*apply_ox1)(pG);
00434     }
00435 
00436 #ifdef SHEARING_BOX
00437     if (pG->Nx3>1) {
00438     /* For 3D shearing box boundary conditions */
00439       if (my_iproc == 0) /* inner boundary */
00440         shearingbox_ix1_particle(pG, pD, numpar);
00441 
00442       if (my_iproc == (pD->NGrid_x1-1)) /* outer boundary */
00443         shearingbox_ox1_particle(pG, pD, numpar);
00444     }
00445 #endif /* SHEARING_BOX */
00446 
00447   }
00448 
00449 /*--- Step 3. ------------------------------------------------------------------
00450  * Boundary Conditions in x2-direction */
00451 
00452   if (pG->Nx2 > 1) {
00453 
00454 #ifdef MPI_PARALLEL
00455 
00456 /* MPI blocks to both left and right */
00457     if (pG->rx2_id >= 0 && pG->lx2_id >= 0) {
00458 
00459     /*-------- sub-step 1: send to right and receive from left --------*/
00460 
00461       /* Post a non-blocking receive for the data size from the left grid */
00462       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx2_id,
00463                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00464       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00465 
00466       /* packing particle on the right to send buffer */
00467       cnt_send = packing_ox2_particle(pG, nghost);
00468 
00469       /* send buffer size to the right grid */
00470       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx2_id,
00471                                 boundary_particle_tag, MPI_COMM_WORLD);
00472       if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00473 
00474       /* physical boundary on the rignt in periodic B.C. */
00475       if (my_jproc == pD->NGrid_x2-1)
00476         shift_packed_particle(send_buf, cnt_send, 2, -Lx2);
00477 
00478       /* receive buffer size from the left grid */
00479       err = MPI_Wait(&rq, &stat);
00480       if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00481 
00482       /* check space for the receive buffer */
00483       if ((cnt_recv+1) >= recv_bufsize)
00484         realloc_recvbuf();
00485 
00486       /* send send_buf to the right and obtain recv_buf from the left */
00487       if (cnt_recv > 0) {
00488         /* Post a non-blocking receive for the input data from the left grid */
00489         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00490                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00491         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00492       }
00493 
00494       if (cnt_send > 0) {
00495         /* send buffer to the right grid */
00496         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00497                                  boundary_particle_tag, MPI_COMM_WORLD);
00498         if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00499       }
00500 
00501       if (cnt_recv > 0) {
00502         /* receive buffer from the left grid */
00503         err = MPI_Wait(&rq, &stat);
00504         if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00505 
00506         /* unpack the received particle */
00507         unpack_particle(pG, recv_buf, cnt_recv);
00508       }
00509 
00510     /*-------- sub-step 2: send to left and receive from right --------*/
00511 
00512       /* Post a non-blocking receive for the data size from the right grid */
00513       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx2_id,
00514                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00515       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00516 
00517       /* packing particle on the left to send buffer */
00518       cnt_send = packing_ix2_particle(pG, nghost);
00519 
00520       /* send buffer size to the left grid */
00521       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx2_id,
00522                                 boundary_particle_tag, MPI_COMM_WORLD);
00523       if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00524 
00525       if (my_jproc == 0) /* physical boundary on the left in periodic B.C. */
00526         shift_packed_particle(send_buf, cnt_send, 2, Lx2);
00527 
00528       /* receive buffer size from the right grid */
00529       err = MPI_Wait(&rq, &stat);
00530       if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00531 
00532       /* check space for the receive buffer */
00533       if ((cnt_recv+1) >= recv_bufsize)
00534         realloc_recvbuf();
00535 
00536       /* send send_buf to the left and obtain recv_buf from the right */
00537       if (cnt_recv > 0) {
00538         /* Post a non-blocking receive for the input data from the right grid */
00539         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00540                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00541         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00542       }
00543 
00544       if (cnt_send > 0) {
00545         /* send buffer to the right grid */
00546         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00547                                  boundary_particle_tag, MPI_COMM_WORLD);
00548         if(err) ath_error("[send_ox2_particle]: MPI_Send error = %d\n",err);
00549       }
00550 
00551       if (cnt_recv > 0) {
00552         /* receive buffer from the left grid */
00553         err = MPI_Wait(&rq, &stat);
00554         if(err) ath_error("[receive_ox2_particle]: MPI_Wait error = %d\n",err);
00555 
00556         /* unpack the received particle */
00557         unpack_particle(pG, recv_buf, cnt_recv);
00558       }
00559     }
00560 
00561 /* Physical boundary on left, MPI block on right */
00562     if (pG->rx2_id >= 0 && pG->lx2_id < 0) {
00563 
00564     /*-------- sub-step 1: send to right --------*/
00565 
00566       /* packing particle on the right to send buffer */
00567       cnt_send = packing_ox2_particle(pG, nghost);
00568 
00569       /* send buffer size to the right grid */
00570       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx2_id,
00571                                 boundary_particle_tag, MPI_COMM_WORLD);
00572       if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00573 
00574       /* send buffer to the right grid */
00575       if (cnt_send > 0) {
00576         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00577                                  boundary_particle_tag, MPI_COMM_WORLD);
00578         if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00579       }
00580 
00581     /*-------- sub-step 2: apply left boundary condition --------*/
00582 
00583       (*apply_ix2)(pG);
00584 
00585     /*-------- sub-step 3: receive from right --------*/
00586 
00587       /* Post a non-blocking receive for the data size from the right grid */
00588       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx2_id,
00589                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00590       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00591 
00592       /* receive buffer size from the right grid */
00593       err = MPI_Wait(&rq, &stat);
00594       if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00595 
00596       /* check space for the receive buffer */
00597       if ((cnt_recv+1) >= recv_bufsize)
00598         realloc_recvbuf();
00599 
00600       if (cnt_recv > 0) {
00601         /* Post a non-blocking receive for the input data from the right grid */
00602         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00603                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00604         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00605 
00606         /* receive buffer from the left grid */
00607         err = MPI_Wait(&rq, &stat);
00608         if(err) ath_error("[receive_ox2_particle]: MPI_Wait error = %d\n",err);
00609 
00610         /* unpack the received particle */
00611         unpack_particle(pG, recv_buf, cnt_recv);
00612       }
00613     }
00614 
00615 /* MPI block on left, Physical boundary on right */
00616     if (pG->rx2_id < 0 && pG->lx2_id >= 0) {
00617 
00618     /*-------- sub-step 1: receive from left --------*/
00619 
00620       /* Post a non-blocking receive for the data size from the left grid */
00621       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx2_id,
00622                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00623       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00624 
00625       /* receive buffer size from the left grid */
00626       err = MPI_Wait(&rq, &stat);
00627       if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00628 
00629       /* check space for the receive buffer */
00630       if ((cnt_recv+1) >= recv_bufsize)
00631         realloc_recvbuf();
00632       if (cnt_recv > 0) {
00633         /* Post a non-blocking receive for the input data from the left grid */
00634         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00635                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00636         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00637       }
00638 
00639     /*-------- sub-step 2: apply right boundary condition --------*/
00640 
00641       (*apply_ox2)(pG);
00642 
00643     /*-------- sub-step 3: send to left --------*/
00644 
00645       /* packing particle on the left to send buffer */
00646       cnt_send = packing_ix2_particle(pG, nghost);
00647 
00648       /* send buffer size to the left grid */
00649       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx2_id,
00650                                 boundary_particle_tag, MPI_COMM_WORLD);
00651       if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00652 
00653       /* send buffer to the right grid */
00654       if (cnt_send > 0) {
00655         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00656                                  boundary_particle_tag, MPI_COMM_WORLD);
00657         if(err) ath_error("[send_ox2_particle]: MPI_Send error = %d\n",err);
00658       }
00659 
00660     /*-------- sub-step 1: receive from left (cont) --------*/
00661 
00662       if (cnt_recv > 0) {
00663         /* receive buffer from the left grid */
00664         err = MPI_Wait(&rq, &stat);
00665         if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00666 
00667         /* unpack the received particle */
00668         unpack_particle(pG, recv_buf, cnt_recv);
00669       }
00670     }
00671 #endif /* MPI_PARALLEL */
00672 
00673 /* Physical boundaries on both left and right */
00674     if (pG->rx2_id < 0 && pG->lx2_id < 0) {
00675       (*apply_ix2)(pG);
00676       (*apply_ox2)(pG);
00677     }
00678   }
00679 
00680 /*--- Step 4. ------------------------------------------------------------------
00681  * Boundary Conditions in x3-direction */
00682 
00683   if (pG->Nx3 > 1){
00684 
00685 #ifdef MPI_PARALLEL
00686 
00687 /* MPI blocks to both left and right */
00688     if (pG->rx3_id >= 0 && pG->lx3_id >= 0) {
00689 
00690     /*-------- sub-step 1: send to right and receive from left --------*/
00691 
00692       /* Post a non-blocking receive for the data size from the left grid */
00693       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx3_id,
00694                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00695       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00696 
00697       /* packing particle on the right to send buffer */
00698       cnt_send = packing_ox3_particle(pG, nghost);
00699 
00700       /* send buffer size to the right grid */
00701       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx3_id,
00702                                 boundary_particle_tag, MPI_COMM_WORLD);
00703       if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00704 
00705       /* physical boundary on the rignt in periodic B.C. */
00706       if (my_kproc == pD->NGrid_x3-1) 
00707         shift_packed_particle(send_buf, cnt_send, 3, -Lx3);
00708 
00709       /* receive buffer size from the left grid */
00710       err = MPI_Wait(&rq, &stat);
00711       if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00712 
00713       /* check space for the receive buffer */
00714       if ((cnt_recv+1) >= recv_bufsize)
00715         realloc_recvbuf();
00716 
00717       /* send send_buf to the right and obtain recv_buf from the left */
00718       if (cnt_recv > 0) {
00719         /* Post a non-blocking receive for the input data from the left grid */
00720         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00721                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00722         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00723       }
00724 
00725       if (cnt_send > 0) {
00726         /* send buffer to the right grid */
00727         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00728                                  boundary_particle_tag, MPI_COMM_WORLD);
00729         if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00730       }
00731 
00732       if (cnt_recv > 0) {
00733         /* receive buffer from the left grid */
00734         err = MPI_Wait(&rq, &stat);
00735         if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00736 
00737         /* unpack the received particle */
00738         unpack_particle(pG, recv_buf, cnt_recv);
00739       }
00740 
00741     /*-------- sub-step 2: send to left and receive from right --------*/
00742 
00743       /* Post a non-blocking receive for the data size from the right grid */
00744       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx3_id,
00745                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00746       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00747 
00748       /* packing particle on the left to send buffer */
00749       cnt_send = packing_ix3_particle(pG, nghost);
00750 
00751       /* send buffer size to the left grid */
00752       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx3_id,
00753                                 boundary_particle_tag, MPI_COMM_WORLD);
00754       if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00755 
00756       if (my_kproc == 0) /* physical boundary on the left in periodic B.C. */
00757         shift_packed_particle(send_buf, cnt_send, 3, Lx3);
00758 
00759       /* receive buffer size from the right grid */
00760       err = MPI_Wait(&rq, &stat);
00761       if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00762 
00763       /* check space for the receive buffer */
00764       if ((cnt_recv+1) >= recv_bufsize)
00765         realloc_recvbuf();
00766 
00767       /* send send_buf to the left and obtain recv_buf from the right */
00768       if (cnt_recv > 0) {
00769         /* Post a non-blocking receive for the input data from the right grid */
00770         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00771                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00772         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00773       }
00774 
00775       if (cnt_send > 0) {
00776         /* send buffer to the right grid */
00777         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00778                                  boundary_particle_tag, MPI_COMM_WORLD);
00779         if(err) ath_error("[send_ox3_particle]: MPI_Send error = %d\n",err);
00780       }
00781 
00782       if (cnt_recv > 0) {
00783         /* receive buffer from the left grid */
00784         err = MPI_Wait(&rq, &stat);
00785         if(err) ath_error("[receive_ox3_particle]: MPI_Wait error = %d\n",err);
00786 
00787         /* unpack the received particle */
00788         unpack_particle(pG, recv_buf, cnt_recv);
00789       }
00790     }
00791 
00792 /* Physical boundary on left, MPI block on right */
00793     if (pG->rx3_id >= 0 && pG->lx3_id < 0) {
00794 
00795     /*-------- sub-step 1: send to right --------*/
00796 
00797       /* packing particle on the right to send buffer */
00798       cnt_send = packing_ox3_particle(pG, nghost);
00799 
00800       /* send buffer size to the right grid */
00801       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx3_id,
00802                                 boundary_particle_tag, MPI_COMM_WORLD);
00803       if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00804 
00805       /* send buffer to the right grid */
00806       if (cnt_send > 0) {
00807         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00808                                  boundary_particle_tag, MPI_COMM_WORLD);
00809         if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00810       }
00811 
00812     /*-------- sub-step 2: apply left boundary condition --------*/
00813 
00814       (*apply_ix3)(pG);
00815 
00816     /*-------- sub-step 3: receive from right --------*/
00817 
00818       /* Post a non-blocking receive for the data size from the right grid */
00819       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx3_id,
00820                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00821       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00822       /* receive buffer size from the right grid */
00823       err = MPI_Wait(&rq, &stat);
00824       if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00825 
00826       /* check space for the receive buffer */
00827       if ((cnt_recv+1) >= recv_bufsize)
00828         realloc_recvbuf();
00829 
00830       if (cnt_recv > 0) {
00831         /* Post a non-blocking receive for the input data from the right grid */
00832         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00833                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00834         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00835         /* receive buffer from the left grid */
00836         err = MPI_Wait(&rq, &stat);
00837         if(err) ath_error("[receive_ox3_particle]: MPI_Wait error = %d\n",err);
00838 
00839         /* unpack the received particle */
00840         unpack_particle(pG, recv_buf, cnt_recv);
00841       }
00842     }
00843 
00844 /* MPI block on left, Physical boundary on right */
00845     if (pG->rx3_id < 0 && pG->lx3_id >= 0) {
00846 
00847     /*-------- sub-step 1: receive from left --------*/
00848 
00849       /* Post a non-blocking receive for the data size from the left grid */
00850       err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx3_id,
00851                                  boundary_particle_tag, MPI_COMM_WORLD, &rq);
00852       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00853 
00854       /* receive buffer size from the left grid */
00855       err = MPI_Wait(&rq, &stat);
00856       if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00857 
00858       /* check space for the receive buffer */
00859       if ((cnt_recv+1) >= recv_bufsize)
00860         realloc_recvbuf();
00861 
00862       if (cnt_recv > 0) {
00863         /* Post a non-blocking receive for the input data from the left grid */
00864         err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00865                                   boundary_particle_tag, MPI_COMM_WORLD, &rq);
00866         if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00867       }
00868 
00869     /*-------- sub-step 2: apply right boundary condition --------*/
00870 
00871       (*apply_ox3)(pG);
00872 
00873     /*-------- sub-step 3: send to left --------*/
00874 
00875       /* packing particle on the left to send buffer */
00876       cnt_send = packing_ix3_particle(pG, nghost);
00877 
00878       /* send buffer size to the left grid */
00879       err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx3_id,
00880                                 boundary_particle_tag, MPI_COMM_WORLD);
00881       if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00882 
00883       if (cnt_send > 0) {
00884         /* send buffer to the right grid */
00885         err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00886                                  boundary_particle_tag, MPI_COMM_WORLD);
00887         if(err) ath_error("[send_ox3_particle]: MPI_Send error = %d\n",err);
00888       }
00889 
00890     /*-------- sub-step 1: receive from left (cont) --------*/
00891 
00892       if (cnt_recv > 0) {
00893         /* receive buffer from the left grid */
00894         err = MPI_Wait(&rq, &stat);
00895         if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00896 
00897         /* unpack the received particle */
00898         unpack_particle(pG, recv_buf, cnt_recv);
00899       }
00900     }
00901 #endif /* MPI_PARALLEL */
00902 
00903 /* Physical boundaries on both left and right */
00904     if (pG->rx3_id < 0 && pG->lx3_id < 0) {
00905       (*apply_ix3)(pG);
00906       (*apply_ox3)(pG);
00907     }
00908   }
00909 
00910 /*--- Step 5. ------------------------------------------------------------------
00911  * Update the status of the crossing particles */
00912   update_particle_status(pG);
00913 
00914   return;
00915 }
00916 
00917 
00918 #ifdef FARGO
00919 /*----------------------------------------------------------------------------*/
00920 /*! \fn void advect_particles(Grid *pG, Domain *pD)
00921  *  \brief Advect particles by qshear*Omega_0*x1*dt for the FARGO algorithm. */
00922 void advect_particles(Grid *pG, Domain *pD)
00923 {
00924   Grain *cur;
00925   long p;
00926   Real x1l, x1u;
00927 #ifdef MPI_PARALLEL
00928   long cnt_recv, n;
00929   int ishl, ishu, i;
00930   int inds, indr, ids, idr;
00931   Real x2len, yl, yu;
00932   int err;
00933   MPI_Request rq;
00934   MPI_Status stat;
00935 #endif /* MPI_PARALLEL */
00936 
00937   /* lower and upper bound of the grid in x1 direction */
00938   x1l = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
00939   x1u = pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1;
00940 
00941   /* shift the particles */
00942   for (p=0; p<pG->nparticle; p++) {
00943     cur = &(pG->particle[p]);
00944     cur->x2 = x2min + fmod(cur->x2 + pG->parsub[p].shift - x2min + Lx2, Lx2);
00945   }
00946 
00947 #ifdef MPI_PARALLEL
00948   /* calculate the farthest grid that advection can reach */
00949   x2len = pG->dx2*pG->Nx2;
00950   ishl = gridshift((qshear*Omega_0*(x1l-pG->dx1)*pG->dt - pG->dx2)/x2len);
00951   ishu = gridshift((qshear*Omega_0*(x1u+pG->dx1)*pG->dt + pG->dx2)/x2len);
00952 
00953   /* loop over all the possible destination grids */
00954   for (i=ishl; i<=ishu; i++)
00955   if (i != 0) { /* avoid moving particles to the same grid */
00956 
00957     /* find the processor id to send/receive data */
00958     inds = my_jproc + i;
00959     if (inds < 0) inds += pD->NGrid_x2;
00960     if (inds > (pD->NGrid_x2-1)) inds -= pD->NGrid_x2;
00961     ids = pD->GridArray[my_kproc][inds][my_iproc].id;   /* send to */
00962 
00963     indr = my_jproc - i;
00964     if (indr < 0) indr += pD->NGrid_x2;
00965     if (indr > (pD->NGrid_x2-1)) indr -= pD->NGrid_x2;
00966     idr = pD->GridArray[my_kproc][indr][my_iproc].id;   /* receive from */
00967 
00968     /* Post a non-blocking receive for the data size */
00969     err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, idr, boundary_particle_tag,
00970                                                  MPI_COMM_WORLD, &rq);
00971     if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00972 
00973     /* packing particles */
00974     yl = x2min + inds*x2len;    yu = x2min + (inds+1)*x2len;
00975     n = packing_particle_fargo(pG, yl, yu);
00976 
00977     /* send buffer size */
00978     err = MPI_Send(&n, 1, MPI_LONG, ids, boundary_particle_tag, MPI_COMM_WORLD);
00979     if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
00980 
00981     /* receive buffer size */
00982     err = MPI_Wait(&rq, &stat);
00983     if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
00984 
00985     /* check space for the receive buffer */
00986     if ((cnt_recv+1) >= recv_bufsize)
00987       realloc_recvbuf();
00988 
00989     /* Post a non-blocking receive for data */
00990     if (cnt_recv > 0) {
00991       err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, idr,
00992                                 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00993       if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00994     }
00995 
00996     /* send buffer */
00997     if (n > 0) {
00998       err = MPI_Send(send_buf, n*NVAR_P, MPI_DOUBLE, ids,
00999                                boundary_particle_tag, MPI_COMM_WORLD);
01000       if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
01001     }
01002 
01003     /* receive buffer */
01004     if (cnt_recv > 0) {
01005       err = MPI_Wait(&rq, &stat);
01006       if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
01007 
01008       /* unpack the received particle */
01009       unpack_particle(pG, recv_buf, cnt_recv);
01010     }
01011   }
01012 #endif /* MPI_PARALLEL */
01013 
01014   return;
01015 }
01016 #endif /* FARGO */
01017 
01018 /*----------------------------------------------------------------------------*/
01019 /*! \fn void set_bvals_particle_init(Grid *pG, Domain *pD)
01020  *  \brief Sets function pointers for physical boundaries during
01021  *   initialization, allocates memory for send/receive buffers with MPI
01022  */
01023 void set_bvals_particle_init(Grid *pG, Domain *pD)
01024 {
01025   int ibc_x1, obc_x1; /* x1 inner and outer boundary condition flag */
01026   int ibc_x2, obc_x2; /* x2 inner and outer boundary condition flag */
01027   int ibc_x3, obc_x3; /* x3 inner and outer boundary condition flag */
01028 #ifdef MPI_PARALLEL
01029   int ib,jb,kb;
01030   int my_id = pG->my_id;
01031 #endif /* MPI_PARALLEL */
01032 
01033 /* initialize buffers */
01034   NBUF = (long)(0.15*pG->arrsize);
01035 
01036   send_bufsize = NBUF;
01037   recv_bufsize = NBUF;
01038   send_buf = (double*)calloc_1d_array(NVAR_P*send_bufsize, sizeof(double));
01039   recv_buf = (double*)calloc_1d_array(NVAR_P*recv_bufsize, sizeof(double));
01040 
01041 /* number of boundary layers to pack the particles */
01042 #ifdef FEEDBACK
01043   nbc = 4;  /* need 4 layers of ghost particles for feedback predictor */
01044 #else
01045   nbc = 1;  /* leave one layer for output purposes */
01046 #endif
01047 
01048 /* calculate distances of the computational domain and shear velocity */
01049   x1min = par_getd("grid","x1min");
01050   x1max = par_getd("grid","x1max");
01051   Lx1 = x1max - x1min;
01052 
01053   x2min = par_getd("grid","x2min");
01054   x2max = par_getd("grid","x2max");
01055   Lx2 = x2max - x2min;
01056 
01057   x3min = par_getd("grid","x3min");
01058   x3max = par_getd("grid","x3max");
01059   Lx3 = x3max - x3min;
01060 
01061   get_myGridIndex(pD, pG->my_id, &my_iproc, &my_jproc, &my_kproc);
01062 
01063   /* get the number of time steps for shuffle */
01064   NShuffle = par_geti_def("particle","nshuf",0);/* by default, do not shuffle */
01065 
01066 #ifdef SHEARING_BOX
01067   /* shear velocity between inner and outer x1 boundaries */
01068   vshear = qshear * Omega_0 * Lx1;
01069 #endif /* SHEARING_BOX */
01070 
01071 /* Set function pointers for physical boundaries in x1-direction */
01072 
01073   if(pG->Nx1 > 1) {
01074     if(apply_ix1 == NULL){
01075 
01076       ibc_x1 = par_geti("grid","ibc_x1");
01077       switch(ibc_x1){
01078 
01079       case 1: /* Reflecting */
01080         apply_ix1 = reflect_ix1_particle;
01081         break;
01082       case 5: /* Reflecting */
01083         apply_ix1 = reflect_ix1_particle;
01084         break;
01085 
01086       case 2: /* Outflow */
01087         apply_ix1 = outflow_particle;
01088         break;
01089 
01090       case 4: /* Periodic */
01091         apply_ix1 = periodic_ix1_particle;
01092 #ifdef MPI_PARALLEL
01093         if(pG->lx1_id < 0 && pD->NGrid_x1 > 1){
01094           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01095           pG->lx1_id = pD->GridArray[kb][jb][pD->NGrid_x1-1].id;
01096         }
01097 #endif /* MPI_PARALLEL */
01098         break;
01099 
01100       default:
01101         ath_perr(-1,"[set_bvals_particle_init]: ibc_x1 = %d unknown\n",ibc_x1);
01102         exit(EXIT_FAILURE);
01103       }
01104 
01105     }
01106 
01107     if(apply_ox1 == NULL){
01108 
01109       obc_x1 = par_geti("grid","obc_x1");
01110       switch(obc_x1){
01111 
01112       case 1: /* Reflecting */
01113         apply_ox1 = reflect_ox1_particle;
01114         break;
01115       case 5: /* Reflecting */
01116         apply_ox1 = reflect_ox1_particle;
01117         break;
01118 
01119       case 2: /* Outflow */
01120         apply_ox1 = outflow_particle;
01121         break;
01122 
01123       case 4: /* Periodic */
01124         apply_ox1 = periodic_ox1_particle;
01125 #ifdef MPI_PARALLEL
01126         if(pG->rx1_id < 0 && pD->NGrid_x1 > 1){
01127           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01128           pG->rx1_id = pD->GridArray[kb][jb][0].id;
01129         }
01130 #endif /* MPI_PARALLEL */
01131         break;
01132 
01133       default:
01134         ath_perr(-1,"[set_bvals_particle_init]: obc_x1 = %d unknown\n",obc_x1);
01135         exit(EXIT_FAILURE);
01136       }
01137 
01138     }
01139   }
01140 
01141 /* Set function pointers for physical boundaries in x2-direction */
01142 
01143   if(pG->Nx2 > 1) {
01144     if(apply_ix2 == NULL){
01145 
01146       ibc_x2 = par_geti("grid","ibc_x2");
01147       switch(ibc_x2){
01148 
01149       case 1: /* Reflecting */
01150         apply_ix2 = reflect_ix2_particle;
01151         break;
01152       case 5: /* Reflecting */
01153         apply_ix2 = reflect_ix2_particle;
01154         break;
01155 
01156       case 2: /* Outflow */
01157         apply_ix2 = outflow_particle;
01158         break;
01159 
01160       case 4: /* Periodic */
01161         apply_ix2 = periodic_ix2_particle;
01162 #ifdef MPI_PARALLEL
01163         if(pG->lx2_id < 0 && pD->NGrid_x2 > 1){
01164           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01165           pG->lx2_id = pD->GridArray[kb][pD->NGrid_x2-1][ib].id;
01166         }
01167 #endif /* MPI_PARALLEL */
01168         break;
01169 
01170       default:
01171         ath_perr(-1,"[set_bvals_particle_init]: ibc_x2 = %d unknown\n",ibc_x2);
01172         exit(EXIT_FAILURE);
01173       }
01174 
01175     }
01176 
01177     if(apply_ox2 == NULL){
01178 
01179       obc_x2 = par_geti("grid","obc_x2");
01180       switch(obc_x2){
01181 
01182       case 1: /* Reflecting */
01183         apply_ox2 = reflect_ox2_particle;
01184         break;
01185       case 5: /* Reflecting */
01186         apply_ox2 = reflect_ox2_particle;
01187         break;
01188 
01189       case 2: /* Outflow */
01190         apply_ox2 = outflow_particle;
01191         break;
01192 
01193       case 4: /* Periodic */
01194         apply_ox2 = periodic_ox2_particle;
01195 #ifdef MPI_PARALLEL
01196         if(pG->rx2_id < 0 && pD->NGrid_x2 > 1){
01197           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01198           pG->rx2_id = pD->GridArray[kb][0][ib].id;
01199         }
01200 #endif /* MPI_PARALLEL */
01201         break;
01202 
01203       default:
01204         ath_perr(-1,"[set_bvals_particle_init]: obc_x2 = %d unknown\n",obc_x2);
01205         exit(EXIT_FAILURE);
01206       }
01207 
01208     }
01209   }
01210 
01211 /* Set function pointers for physical boundaries in x3-direction */
01212 
01213   if(pG->Nx3 > 1) {
01214     if(apply_ix3 == NULL){
01215 
01216       ibc_x3 = par_geti("grid","ibc_x3");
01217       switch(ibc_x3){
01218 
01219       case 1: /* Reflecting */
01220         apply_ix3 = reflect_ix3_particle;
01221         break;
01222       case 5: /* Reflecting */
01223         apply_ix3 = reflect_ix3_particle;
01224         break;
01225 
01226       case 2: /* Outflow */
01227         apply_ix3 = outflow_particle;
01228         break;
01229 
01230       case 4: /* Periodic */
01231         apply_ix3 = periodic_ix3_particle;
01232 #ifdef MPI_PARALLEL
01233         if(pG->lx3_id < 0 && pD->NGrid_x3 > 1){
01234           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01235           pG->lx3_id = pD->GridArray[pD->NGrid_x3-1][jb][ib].id;
01236         }
01237 #endif /* MPI_PARALLEL */
01238         break;
01239 
01240       default:
01241         ath_perr(-1,"[set_bvals_particle_init]: ibc_x3 = %d unknown\n",ibc_x3);
01242         exit(EXIT_FAILURE);
01243       }
01244 
01245     }
01246 
01247     if(apply_ox3 == NULL){
01248 
01249       obc_x3 = par_geti("grid","obc_x3");
01250       switch(obc_x3){
01251 
01252       case 1: /* Reflecting */
01253         apply_ox3 = reflect_ox3_particle;
01254         break;
01255       case 5: /* Reflecting */
01256         apply_ox3 = reflect_ox3_particle;
01257         break;
01258 
01259       case 2: /* Outflow */
01260         apply_ox3 = outflow_particle;
01261         break;
01262 
01263       case 4: /* Periodic */
01264         apply_ox3 = periodic_ox3_particle;
01265 #ifdef MPI_PARALLEL
01266         if(pG->rx3_id < 0 && pD->NGrid_x3 > 1){
01267           get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01268           pG->rx3_id = pD->GridArray[0][jb][ib].id;
01269         }
01270 #endif /* MPI_PARALLEL */
01271         break;
01272 
01273       default:
01274         ath_perr(-1,"[set_bvals_particle_init]: obc_x3 = %d unknown\n",obc_x3);
01275         exit(EXIT_FAILURE);
01276       }
01277 
01278     }
01279   }
01280 
01281   return;
01282 }
01283 
01284 /*----------------------------------------------------------------------------*/
01285 /*! \fn void set_bvals_particle_fun(enum Direction dir, VBCFun_t prob_bc)
01286  *  \brief Sets function pointers for user-defined BCs in problem file
01287  */
01288 
01289 void set_bvals_particle_fun(enum Direction dir, VBCFun_t prob_bc)
01290 {
01291   switch(dir){
01292   case left_x1:
01293     apply_ix1 = prob_bc;
01294     break;
01295   case right_x1:
01296     apply_ox1 = prob_bc;
01297     break;
01298   case left_x2:
01299     apply_ix2 = prob_bc;
01300     break;
01301   case right_x2:
01302     apply_ox2 = prob_bc;
01303     break;
01304   case left_x3:
01305     apply_ix3 = prob_bc;
01306     break;
01307   case right_x3:
01308     apply_ox3 = prob_bc;
01309     break;
01310   default:
01311     ath_perr(-1,"[set_bvals_particle_fun]: Unknown direction = %d\n",dir);
01312     exit(EXIT_FAILURE);
01313   }
01314   return;
01315 }
01316 
01317 /*! \fn void set_bvals_particle_destruct(Grid *pG, Domain *pD)
01318  *  \brief Finalize boundary condition */
01319 void set_bvals_particle_destruct(Grid *pG, Domain *pD)
01320 {
01321   apply_ix1 = NULL;
01322   apply_ox1 = NULL;
01323   apply_ix2 = NULL;
01324   apply_ox2 = NULL;
01325   apply_ix3 = NULL;
01326   apply_ox3 = NULL;
01327   free(send_buf);
01328   free(recv_buf);
01329   send_bufsize = 0;
01330   recv_bufsize = 0;
01331   return;
01332 }
01333 
01334 /*=========================== PRIVATE FUNCTIONS ==============================*/
01335 /*----------------------------------------------------------------------------*/
01336 /* Following are the functions:
01337  *   realloc_sendbuf & realloc_recvbuf
01338  *   update_particle_status
01339  *   reflecting_???_particle
01340  *   outflow_???_particle
01341  *   periodic_???_particle
01342  *   packing_???_particle
01343  *   unpack__particle
01344  *   shearing box related functions
01345  *   fargo related functions
01346  * where ???=[ix1,ox1,ix2,ox2,ix3,ox3]
01347  */
01348 
01349 /*----------------------------------------------------------------------------*/
01350 /*! \fn static void realloc_sendbuf()
01351  *  \brief Reallocate memory to send buffer */
01352 static void realloc_sendbuf()
01353 {
01354   send_bufsize += NBUF;
01355   ath_pout(1,"[set_bvals_prticles]: reallocating send buffer...");
01356   if ((send_buf = (double*)realloc(send_buf,
01357                            NVAR_P*(send_bufsize)*sizeof(double))) == NULL)
01358     ath_error("[set_bvals_prticles]: failed to allocate memory for buffer.\n");
01359 
01360   return;
01361 }
01362 
01363 /*----------------------------------------------------------------------------*/
01364 /*! \fn static void realloc_recvbuf()
01365  *  \brief Reallocate memory to receive buffer */
01366 static void realloc_recvbuf()
01367 {
01368   recv_bufsize += NBUF;
01369   ath_pout(1,"[set_bvals_prticles]: reallocating receive buffer...");
01370   if ((recv_buf = (double*)realloc(recv_buf,
01371                            NVAR_P*(recv_bufsize)*sizeof(double))) == NULL)
01372     ath_error("[set_bvals_prticles]: failed to allocate memory for buffer.\n");
01373 
01374   return;
01375 }
01376 
01377 /*----------------------------------------------------------------------------*/
01378 /*! \fn static void update_particle_status(Grid *pG)
01379  *  \brief Update the status of the particles after applying boundary conditions
01380  */
01381 static void update_particle_status(Grid *pG)
01382 {
01383   long p;
01384   Grain *cur;
01385 
01386   for (p=0; p<pG->nparticle; p++) {
01387     cur = &(pG->particle[p]);
01388     if (cur->pos >= 10) /* crossing out/in particle from the previous step */
01389     {
01390       if ((cur->x1>=x1upar) || (cur->x1<x1lpar) || (cur->x2>=x2upar) ||
01391           (cur->x2<x2lpar) || (cur->x3>=x3upar) || (cur->x3<x3lpar))
01392         cur->pos = 0; /* ghost particle */
01393 
01394       else
01395         cur->pos = 1; /* grid particle */
01396     }
01397   }
01398 
01399   return;
01400 }
01401 
01402 /*----------------------------------------------------------------------------*/
01403 /*! \fn static void reflect_ix1_particle(Grid *pG)
01404  *  \brief REFLECTING boundary conditions, Inner x1 boundary (ibc_x1=1) */
01405 static void reflect_ix1_particle(Grid *pG)
01406 {
01407   Real x1b2;    /* 2*(x1 border coordinate of grid cell pG->ie) */
01408   long n, n0, p;
01409 
01410   /* get lower and upper coordinate limit in x1 outer boundary */
01411   x1b2 = 2.0*(pG->x1_0 + (pG->is + pG->idisp)*pG->dx1);
01412 
01413   /* pack boundary particles */
01414   n = packing_ix1_particle(pG, nghost);
01415 
01416   /* get the rear of the particle list */
01417   n0 = pG->nparticle;
01418 
01419   /* copy boudary particles to the rear */
01420   unpack_particle(pG, send_buf, n);
01421 
01422   /* apply reflection boundary condition */
01423   for (p=n0; p<pG->nparticle; p++)
01424   {
01425     pG->particle[p].x1 = x1b2 - pG->particle[p].x1;
01426     pG->particle[p].v1 = -pG->particle[p].v1;
01427   }
01428 
01429   return;
01430 }
01431 
01432 /*----------------------------------------------------------------------------*/
01433 /*! \fn static void reflect_ox1_particle(Grid *pG)
01434  *  \brief REFLECTING boundary conditions, Outer x1 boundary (ibc_x1=1)
01435  */
01436 static void reflect_ox1_particle(Grid *pG)
01437 {
01438   Real x1b2;    /* 2*(x1 border coordinate of grid cell pG->ie) */
01439   long n, n0, p;
01440 
01441   /* get lower and upper coordinate limit in x1 outer boundary */
01442   x1b2 = 2.0*(pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1);
01443 
01444   /* pack boundary particles */
01445   n = packing_ox1_particle(pG, nghost);
01446 
01447   /* get the rear of the particle list */
01448   n0 = pG->nparticle;
01449 
01450   /* copy boudary particles to the rear */
01451   unpack_particle(pG, send_buf, n);
01452 
01453   /* apply reflection boundary condition */
01454   for (p=n0; p<pG->nparticle; p++)
01455   {
01456     pG->particle[p].x1 = x1b2 - pG->particle[p].x1;
01457     pG->particle[p].v1 = -pG->particle[p].v1;
01458   }
01459 
01460   return;
01461 }
01462 
01463 /*----------------------------------------------------------------------------*/
01464 /*! \fn static void reflect_ix2_particle(Grid *pG)
01465  *  \brief REFLECTING boundary conditions, Inner x2 boundary (ibc_x2=1)
01466  */
01467 static void reflect_ix2_particle(Grid *pG)
01468 {
01469   Real x2b2;    /* 2*(x2 border coordinate of grid cell pG->ie) */
01470   long n, n0, p;
01471 
01472   /* get lower and upper coordinate limit in x1 outer boundary */
01473   x2b2 = 2.0*(pG->x2_0 + (pG->js + pG->jdisp)*pG->dx2);
01474 
01475   /* pack boundary particles */
01476   n = packing_ix2_particle(pG, nghost);
01477 
01478   /* get the rear of the particle list */
01479   n0 = pG->nparticle;
01480 
01481   /* copy boudary particles to the rear */
01482   unpack_particle(pG, send_buf, n);
01483 
01484   /* apply reflection boundary condition */
01485   for (p=n0; p<pG->nparticle; p++)
01486   {
01487     pG->particle[p].x2 = x2b2 - pG->particle[p].x2;
01488     pG->particle[p].v2 = -pG->particle[p].v2;
01489   }
01490 
01491   return;
01492 }
01493 
01494 /*----------------------------------------------------------------------------*/
01495 /*! \fn static void reflect_ox2_particle(Grid *pG)
01496  *  \brief REFLECTING boundary conditions, Outer x2 boundary (ibc_x2=1)
01497  */
01498 static void reflect_ox2_particle(Grid *pG)
01499 {
01500   Real x2b2;    /* 2*(x2 border coordinate of grid cell pG->ie) */
01501   long n, n0, p;
01502 
01503   /* get lower and upper coordinate limit in x1 outer boundary */
01504   x2b2 = 2.0*(pG->x2_0 + (pG->je+1 + pG->jdisp)*pG->dx2);
01505 
01506   /* pack boundary particles */
01507   n = packing_ox2_particle(pG, nghost);
01508 
01509   /* get the rear of the particle list */
01510   n0 = pG->nparticle;
01511 
01512   /* copy boudary particles to the rear */
01513   unpack_particle(pG, send_buf, n);
01514 
01515   /* apply reflection boundary condition */
01516   for (p=n0; p<pG->nparticle; p++)
01517   {
01518     pG->particle[p].x2 = x2b2 - pG->particle[p].x2;
01519     pG->particle[p].v2 = -pG->particle[p].v2;
01520   }
01521 
01522   return;
01523 }
01524 
01525 /*----------------------------------------------------------------------------*/
01526 /*! \fn static void reflect_ix3_particle(Grid *pG)
01527  *  \brief REFLECTING boundary conditions, Inner x3 boundary (ibc_x3=1)
01528  */
01529 static void reflect_ix3_particle(Grid *pG)
01530 {
01531   Real x3b2;    /* 2*(x3 border coordinate of grid cell pG->ie) */
01532   long n, n0, p;
01533 
01534   /* get lower and upper coordinate limit in x1 outer boundary */
01535   x3b2 = 2.0*(pG->x3_0 + (pG->ks + pG->kdisp)*pG->dx3);
01536 
01537   /* pack boundary particles */
01538   n = packing_ix3_particle(pG, nghost);
01539 
01540   /* get the rear of the particle list */
01541   n0 = pG->nparticle;
01542 
01543   /* copy boudary particles to the rear */
01544   unpack_particle(pG, send_buf, n);
01545 
01546   /* apply reflection boundary condition */
01547   for (p=n0; p<pG->nparticle; p++)
01548   {
01549     pG->particle[p].x3 = x3b2 - pG->particle[p].x3;
01550     pG->particle[p].v3 = -pG->particle[p].v3;
01551   }
01552 
01553   return;
01554 }
01555 
01556 /*----------------------------------------------------------------------------*/
01557 /*! \fn static void reflect_ox3_particle(Grid *pG)
01558  *  \brief REFLECTING boundary conditions, Outer x3 boundary (ibc_x3=1)
01559  */
01560 static void reflect_ox3_particle(Grid *pG)
01561 {
01562   Real x3b2;    /* 2*(x3 border coordinate of grid cell pG->ie) */
01563   long n, n0, p;
01564 
01565   /* get lower and upper coordinate limit in x1 outer boundary */
01566   x3b2 = 2.0*(pG->x3_0 + (pG->ke+1 + pG->kdisp)*pG->dx3);
01567 
01568   /* pack boundary particles */
01569   n = packing_ox3_particle(pG, nghost);
01570 
01571   /* get the rear of the particle list */
01572   n0 = pG->nparticle;
01573 
01574   /* copy boudary particles to the rear */
01575   unpack_particle(pG, send_buf, n);
01576 
01577 
01578   /* apply reflection boundary condition */
01579   for (p=n0; p<pG->nparticle; p++)
01580   {
01581     pG->particle[p].x3 = x3b2 - pG->particle[p].x3;
01582     pG->particle[p].v3 = -pG->particle[p].v3;
01583   }
01584 
01585   return;
01586 }
01587 
01588 /*----------------------------------------------------------------------------*/
01589 /*! \fn static void outflow_particle(Grid *pG)
01590  *  \brief OUTFLOW boundary conditions
01591  *
01592  * For particles, outflow B.C. = No B.C.  We only remove particles in the
01593  * outermost layer of the ghost cells, which is done in remove_ghost_particle(),
01594  * see particle.c.
01595  */
01596 static void outflow_particle(Grid *pG)
01597 {
01598   return;
01599 }
01600 
01601 /*----------------------------------------------------------------------------*/
01602 /*! \fn static void periodic_ix1_particle(Grid *pG)
01603  *  \brief PERIODIC boundary conditions, Inner x1 boundary (ibc_x1=4)
01604  *
01605  * Note: 2D shearing box B.C. is considered here!
01606  */
01607 static void periodic_ix1_particle(Grid *pG)
01608 {
01609   long n = 0;
01610 #ifdef SHEARING_BOX
01611   /* index for vy, 5 for 3D (x1,x2,x3)=(X,Y,Z), 6 for 2D (x1,x2,x3)=(X,Z,Y) */
01612   int vyind;
01613 
01614   if (pG->Nx3 > 1) /* 3D shearing box (x1,x2,x3)=(X,Y,Z) */
01615     vyind = 5;
01616   else             /* 2D shearing box (x1,x2,x3)=(X,Z,Y) */
01617     vyind = 6;
01618 #endif
01619 
01620   /* pack boundary particles */
01621   n = packing_ox1_particle(pG, nghost);
01622 
01623   /* shift the particles */
01624   shift_packed_particle(send_buf, n, 1, -Lx1);
01625 
01626 #ifdef SHEARING_BOX
01627 #ifndef FARGO
01628   /* velocity shift for shearing box */
01629   shift_packed_particle(send_buf, n, vyind, vshear);
01630 #endif
01631 #endif /* SHEARING_BOX */
01632 
01633   /* copy boudary particles to the rear */
01634   unpack_particle(pG, send_buf, n);
01635 
01636   return;
01637 }
01638 
01639 /*----------------------------------------------------------------------------*/
01640 /*! \fn static void periodic_ox1_particle(Grid *pG)
01641  *  \brief PERIODIC boundary conditions, Outer x1 boundary (ibc_x1=4)
01642  *
01643  * Note: 2D shearing box B.C. is considered here!
01644  */
01645 static void periodic_ox1_particle(Grid *pG)
01646 {
01647   long n = 0;
01648 #ifdef SHEARING_BOX
01649   /* index for vy, 5 for 3D (x1,x2,x3)=(X,Y,Z), 6 for 2D (x1,x2,x3)=(X,Z,Y) */
01650   int vyind;
01651 
01652   if (pG->Nx3 > 1) /* 3D shearing box (x1,x2,x3)=(X,Y,Z) */
01653     vyind = 5;
01654   else             /* 2D shearing box (x1,x2,x3)=(X,Z,Y) */
01655     vyind = 6;
01656 #endif
01657 
01658   /* pack boundary particles */
01659   n = packing_ix1_particle(pG, nghost);
01660 
01661   /* shift the particles */
01662   shift_packed_particle(send_buf, n, 1, Lx1);
01663 
01664 #ifdef SHEARING_BOX
01665 #ifndef FARGO
01666   /* velocity shift for shearing box */
01667   shift_packed_particle(send_buf, n, vyind, -vshear);
01668 #endif
01669 #endif /* SHEARING_BOX */
01670 
01671   /* copy boudary particles to the rear */
01672   unpack_particle(pG, send_buf, n);
01673 
01674   return;
01675 }
01676 
01677 /*----------------------------------------------------------------------------*/
01678 /*! \fn static void periodic_ix2_particle(Grid *pG)
01679  *  \brief PERIODIC boundary conditions, Inner x2 boundary (ibc_x2=4)
01680  */
01681 static void periodic_ix2_particle(Grid *pG)
01682 {
01683   long n = 0;
01684 
01685   /* pack boundary particles */
01686   n = packing_ox2_particle(pG, nghost);
01687 
01688   /* shift the particles */
01689   shift_packed_particle(send_buf, n, 2, -Lx2);
01690 
01691   /* copy boudary particles to the rear */
01692   unpack_particle(pG, send_buf, n);
01693 
01694   return;
01695 }
01696 
01697 /*----------------------------------------------------------------------------*/
01698 /*! \fn static void periodic_ox2_particle(Grid *pG)
01699  *  \brief PERIODIC boundary conditions, Outer x2 boundary (ibc_x2=4)
01700  */
01701 static void periodic_ox2_particle(Grid *pG)
01702 {
01703   long n = 0;
01704 
01705   /* pack boundary particles */
01706   n = packing_ix2_particle(pG, nghost);
01707 
01708   /* shift the particles */
01709   shift_packed_particle(send_buf, n, 2, Lx2);
01710 
01711   /* copy boudary particles to the rear */
01712   unpack_particle(pG, send_buf, n);
01713 
01714   return;
01715 }
01716 
01717 /*----------------------------------------------------------------------------*/
01718 /*! \fn static void periodic_ix3_particle(Grid *pG)
01719  *  \brief PERIODIC boundary conditions, Inner x3 boundary (ibc_x3=4)
01720  */
01721 static void periodic_ix3_particle(Grid *pG)
01722 {
01723   long n = 0;
01724 
01725   /* pack boundary particles */
01726   n = packing_ox3_particle(pG, nghost);
01727 
01728   /* shift the particles */
01729   shift_packed_particle(send_buf, n, 3, -Lx3);
01730 
01731   /* copy boudary particles to the rear */
01732   unpack_particle(pG, send_buf, n);
01733 
01734   return;
01735 }
01736 
01737 /*----------------------------------------------------------------------------*/
01738 /*! \fn static void periodic_ox3_particle(Grid *pG)
01739  *  \brief PERIODIC boundary conditions, Outer x3 boundary (ibc_x3=4)
01740  */
01741 static void periodic_ox3_particle(Grid *pG)
01742 {
01743   long n = 0;
01744 
01745   /* pack boundary particles */
01746   n = packing_ix3_particle(pG, nghost);
01747 
01748   /* shift the particles */
01749   shift_packed_particle(send_buf, n, 3, Lx3);
01750 
01751   /* copy boudary particles to the rear */
01752   unpack_particle(pG, send_buf, n);
01753 
01754   return;
01755 }
01756 
01757 /*----------------------------------------------------------------------------*/
01758 /*! \fn static long packing_ix1_particle(Grid *pG, int nlayer)
01759  *  \brief Packing the particle inside the inner x1 boundary
01760  *
01761  * Input: pG: grid;
01762  *   nlayer: number of layers to be packed in the boundary
01763  * Output:
01764  *   send_buf: buffer to save packed particle
01765  *   return: number of packed particle
01766  */
01767 static long packing_ix1_particle(Grid *pG, int nlayer)
01768 {
01769   Grain *cur;   /* current pointer */
01770   Real x1l,x1u; /* lower and upper coordinate limit in x1 inner boundary */
01771   long p, n = 0;
01772   double *pd = send_buf;
01773 
01774   /* get lower and upper coordinate limit in x1 inner boundary */
01775   x1l = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
01776   x1u = pG->x1_0 + (pG->is+nlayer + pG->idisp)*pG->dx1;
01777 
01778   /* loop over all particle to pack the ones in the boundary */
01779   for (p=0; p<pG->nparticle; p++) {
01780     cur = &(pG->particle[p]);
01781 
01782     if (cur->x1 < x1u) {
01783       if ((cur->pos == 0) || (cur->pos == 1))
01784       { /* ghost particle or grid particle */
01785         if (cur->x1 >= x1l) {/* in the boundary */
01786           packing_one_particle(cur, n, 0); /* pack as ghost particle */
01787           n += 1;
01788         }
01789       }
01790 
01791       else if (cur->pos != 21) /* it's not from ox1 */
01792       {/* crossing particle in the boundary */
01793         /* pack as crossing particle from ix1 */
01794         packing_one_particle(cur, n, 11);
01795 
01796         n += 1;
01797       }
01798     }
01799   }
01800 
01801   return n;
01802 }
01803 
01804 /*----------------------------------------------------------------------------*/
01805 /*! \fn static long packing_ox1_particle(Grid *pG, int nlayer)
01806  *  \brief Packing the particle inside the outer x1 boundary
01807  *
01808  * Input: pG: grid;
01809  *   nlayer: number of layers to be packed in the boundary
01810  * Output:
01811  *   send_buf: buffer to save packed particle
01812  *   return: number of packed particle
01813  */
01814 static long packing_ox1_particle(Grid *pG, int nlayer)
01815 {
01816   Grain *cur;   /* current pointer */
01817   Real x1l,x1u; /* lower and upper coordinate limit in x1 outer boundary */
01818   long p, n = 0;
01819   double *pd = send_buf;
01820 
01821   /* get lower and upper coordinate limit in x1 inner boundary */
01822   x1l = pG->x1_0 + (pG->ie-nlayer+1 + pG->idisp)*pG->dx1;
01823   x1u = pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1;
01824 
01825   /* loop over all particle to pack the ones in the boundary */
01826   for (p=0; p<pG->nparticle; p++) {
01827     cur = &(pG->particle[p]);
01828 
01829     if (cur->x1 >= x1l) {
01830       if ((cur->pos == 0) || (cur->pos == 1))
01831       { /* ghost particle or grid particle */
01832         if (cur->x1 < x1u) {/* in the boundary */
01833           packing_one_particle(cur, n, 0); /* pack as ghost particle */
01834           n += 1;
01835         }
01836       }
01837       else if (cur->pos != 11) /* it's not from ix1 */
01838       {/* crossing particle in the boundary */
01839         /* pack as crossing particle from ox1 */
01840         packing_one_particle(cur, n, 21);
01841         n += 1;
01842       }
01843     }
01844   }
01845 
01846   return n;
01847 }
01848 
01849 /*----------------------------------------------------------------------------*/
01850 /*! \fn static long packing_ix2_particle(Grid *pG, int nlayer)
01851  *  \brief Packing the particle inside the inner x2 boundary
01852  *
01853  * Input: pG: grid;
01854  *   nlayer: number of layers to be packed in the boundary
01855  * Output:
01856  *   send_buf: buffer to save packed particle
01857  *   return: number of packed particle
01858  */
01859 static long packing_ix2_particle(Grid *pG, int nlayer)
01860 {
01861   Grain *cur;   /* current pointer */
01862   Real x2l,x2u; /* lower and upper coordinate limit in x2 inner boundary */
01863   long p, n = 0;
01864   double *pd = send_buf;
01865 
01866   /* get lower and upper coordinate limit in x1 inner boundary */
01867   x2l = pG->x2_0 + (pG->js + pG->jdisp)*pG->dx2;
01868   x2u = pG->x2_0 + (pG->js+nlayer + pG->jdisp)*pG->dx2;
01869 
01870   /* loop over all particle to pack the ones in the boundary */
01871   for (p=0; p<pG->nparticle; p++) {
01872     cur = &(pG->particle[p]);
01873 
01874     if (cur->x2 < x2u) {
01875       if ((cur->pos == 0) || (cur->pos == 1))
01876       { /* ghost particle or grid particle */
01877         if (cur->x2 >= x2l) {/* in the boundary */
01878           packing_one_particle(cur, n, 0); /* pack as ghost particle */
01879           n += 1;
01880         }
01881       }
01882       else if (cur->pos != 22) /* it's not from ox2 */
01883       {/* crossing particle in the boundary */
01884         /* pack as crossing particle from ox1 */
01885         packing_one_particle(cur, n, 12); 
01886         n += 1;
01887       }
01888     }
01889   }
01890 
01891   return n;
01892 }
01893 
01894 /*----------------------------------------------------------------------------*/
01895 /*! \fn static long packing_ox2_particle(Grid *pG, int nlayer)
01896  *  \brief Packing the particle inside the outer x2 boundary
01897  *
01898  * Input: pG: grid;
01899  *   nlayer: number of layers to be packed in the boundary
01900  * Output:
01901  *   send_buf: buffer to save packed particle
01902  *   return: number of packed particle
01903  */
01904 static long packing_ox2_particle(Grid *pG, int nlayer)
01905 {
01906   Grain *cur;   /* current pointer */
01907   Real x2l,x2u; /* lower and upper coordinate limit in x2 outer boundary */
01908   long p, n = 0;
01909   double *pd = send_buf;
01910 
01911   /* get lower and upper coordinate limit in x1 inner boundary */
01912   x2l = pG->x2_0 + (pG->je-nlayer+1 + pG->jdisp)*pG->dx2;
01913   x2u = pG->x2_0 + (pG->je+1 + pG->jdisp)*pG->dx2;
01914 
01915   /* loop over all particle to pack the ones in the boundary */
01916   for (p=0; p<pG->nparticle; p++) {
01917     cur = &(pG->particle[p]);
01918 
01919     if (cur->x2 >= x2l) {
01920       if ((cur->pos == 0) || (cur->pos == 1))
01921       { /* ghost particle or grid particle */
01922         if (cur->x2 < x2u) {/* in the boundary */
01923           packing_one_particle(cur, n, 0); /* pack as ghost particle */
01924           n += 1;
01925         }
01926       }
01927       else if (cur->pos != 12) /* it's not from ix2 */
01928       {/* crossing particle in the boundary */
01929         /* pack as crossing particle from ox2 */
01930         packing_one_particle(cur, n, 22);
01931         n += 1;
01932       }
01933     }
01934   }
01935 
01936   return n;
01937 }
01938 
01939 /*----------------------------------------------------------------------------*/
01940 /*! \fn static long packing_ix3_particle(Grid *pG, int nlayer)
01941  *  \brief Packing the particle inside the inner x3 boundary
01942  *
01943  * Input: pG: grid;
01944  *   nlayer: number of layers to be packed in the boundary
01945  * Output:
01946  *   send_buf: buffer to save packed particle
01947  *   return: number of packed particle
01948  */
01949 static long packing_ix3_particle(Grid *pG, int nlayer)
01950 {
01951   Grain *cur;   /* current pointer */
01952   Real x3l,x3u; /* lower and upper coordinate limit in x3 inner boundary */
01953   long p, n = 0;
01954   double *pd = send_buf;
01955 
01956   /* get lower and upper coordinate limit in x1 inner boundary */
01957   x3l = pG->x3_0 + (pG->ks + pG->kdisp)*pG->dx3;
01958   x3u = pG->x3_0 + (pG->ks+nlayer + pG->kdisp)*pG->dx3;
01959 
01960   /* loop over all particle to pack the ones in the boundary */
01961   for (p=0; p<pG->nparticle; p++) {
01962     cur = &(pG->particle[p]);
01963 
01964     if (cur->x3 < x3u) {
01965       if ((cur->pos == 0) || (cur->pos == 1))
01966       { /* ghost particle or grid particle */
01967         if (cur->x3 >= x3l) {/* in the boundary */
01968           packing_one_particle(cur, n, 0); /* pack as ghost particle */
01969           n += 1;
01970         }
01971       }
01972       else if (cur->pos != 23) /* it's not from ox3 */
01973       {/* crossing particle in the boundary */
01974         /* pack as crossing particle from ix3 */
01975         packing_one_particle(cur, n, 13); 
01976         n += 1;
01977       }
01978     }
01979   }
01980   return n;
01981 }
01982 
01983 /*----------------------------------------------------------------------------*/
01984 /*! \fn static long packing_ox3_particle(Grid *pG, int nlayer)
01985  *  \brief Packing the particle inside the outer x3 boundary
01986  *
01987  * Input: pG: grid;
01988  *   nlayer: number of layers to be packed in the boundary
01989  * Output:
01990  *   send_buf: buffer to save packed particle
01991  *   return: number of packed particle
01992  */
01993 static long packing_ox3_particle(Grid *pG, int nlayer)
01994 {
01995   Grain *cur;   /* current pointer */
01996   Real x3l,x3u; /* lower and upper coordinate limit in x3 outer boundary */
01997   long p, n = 0;
01998 
01999   /* get lower and upper coordinate limit in x1 inner boundary */
02000   x3l = pG->x3_0 + (pG->ke-nlayer+1 + pG->kdisp)*pG->dx3;
02001   x3u = pG->x3_0 + (pG->ke+1 + pG->kdisp)*pG->dx3;
02002 
02003   /* loop over all particle to pack the ones in the boundary */
02004   for (p=0; p<pG->nparticle; p++) {
02005     cur = &(pG->particle[p]);
02006 
02007     if (cur->x3 >= x3l) {
02008       if ((cur->pos == 0) || (cur->pos == 1))
02009       { /* ghost particle or grid particle */
02010         if (cur->x3 < x3u) {/* in the boundary */
02011           packing_one_particle(cur, n, 0); /* pack as ghost particle */
02012           n += 1;
02013         }
02014       }
02015       else if (cur->pos != 13) /* it's not from ix3 */
02016       {/* crossing particle in the boundary */
02017         /* pack as crossing particle from ox3 */
02018         packing_one_particle(cur, n, 23);
02019         n += 1;
02020       }
02021     }
02022   }
02023   return n;
02024 }
02025 
02026 /*----------------------------------------------------------------------------*/
02027 /*! \fn static void packing_one_particle(Grain *cur, long n, short pos)
02028  *  \brief Subroutine for packing one particle to send buffer
02029  *
02030  * Input:
02031  *   cur: particle pointer;
02032  *   p: starting index in the buffer
02033  *   pos: particle position (0: ghost; 1: grid; 2: cross in/out;
02034  * Output:
02035  *   one particle is added to the send buffer
02036  */
02037 static void packing_one_particle(Grain *cur, long n, short pos)
02038 {
02039   double *pd;
02040   if ((n+2) > send_bufsize) {
02041     realloc_sendbuf();
02042   }
02043   pd = &(send_buf[NVAR_P*n]);
02044 
02045   /* pack the particle */
02046   *(pd++) = cur->x1;
02047   *(pd++) = cur->x2;
02048   *(pd++) = cur->x3;
02049   *(pd++) = cur->v1;
02050   *(pd++) = cur->v2;
02051   *(pd++) = cur->v3;
02052   *(pd++) = (double)(cur->property)+0.01;
02053   *(pd++) = (double)(pos)+0.01;
02054   *(pd++) = (double)(cur->my_id)+0.01;
02055 #ifdef MPI_PARALLEL
02056   *(pd++) = (double)(cur->init_id)+0.01;
02057 #endif
02058 
02059   return;
02060 }
02061 
02062 /*----------------------------------------------------------------------------*/
02063 /*! \fn static void shift_packed_particle(double *buf, long n, int index, 
02064  *                                        double shift)
02065  *  \brief shift the coordinate/velocity of the packed particles by a constant 
02066  *  amount
02067  *
02068  * Input:
02069  *   buf: buffer;       n: number of particles in the buffer
02070  *   index: 1: x1; 2: x2; 3: x3; 4: v1; 5: v2; 6: v3;
02071  *   shift: amount of change.
02072  * Output:
02073  *   buf: buffer with shifted particles
02074  */
02075 static void shift_packed_particle(double *buf, long n, int index, double shift)
02076 {
02077   double *pd = buf;
02078   long i;
02079 
02080   if (n>0) pd += index-1;
02081   for (i=0; i<n-1; i++) {
02082     *pd += shift;
02083     pd += NVAR_P;
02084   }
02085   *pd += shift;
02086 
02087   return;
02088 }
02089 
02090 /*----------------------------------------------------------------------------*/
02091 /*! \fn static void unpack_particle(Grid *pG, double *buf, long n)
02092  *  \brief Unpack received particle
02093  * Input:
02094  *   pG: grid;
02095  *   buf: received buffer
02096  *   n: number of particle in the buffer
02097  * Output:
02098  *   pG: grid with new particle added.
02099  */
02100 static void unpack_particle(Grid *pG, double *buf, long n)
02101 {
02102   Grain *cur;           /* current pointer */
02103   Grain *newgr;         /* space for new particle */
02104   double *pd = buf;
02105   long p, i;
02106 
02107   /* initialization */
02108   p = pG->nparticle;
02109   pG->nparticle += n;
02110   if (pG->nparticle >= pG->arrsize)
02111     particle_realloc(pG, pG->nparticle+1);
02112   /* unpacking */
02113   for (i=p; i<pG->nparticle; i++) {
02114     cur = &(pG->particle[i]);
02115     cur->x1 = *(pd++);
02116     cur->x2 = *(pd++);
02117     cur->x3 = *(pd++);
02118     cur->v1 = *(pd++);
02119     cur->v2 = *(pd++);
02120     cur->v3 = *(pd++);
02121     cur->property = (int)(*(pd++));
02122     pG->grproperty[cur->property].num += 1;
02123     cur->pos = (short)(*(pd++));
02124     cur->my_id = (long)(*(pd++));
02125 #ifdef MPI_PARALLEL
02126     cur->init_id = (int)(*(pd++));
02127 #endif
02128   }
02129 
02130   return;
02131 }
02132 
02133 #ifdef SHEARING_BOX
02134 /*----------------------------------------------------------------------------*/
02135 /*! \fn static void shearingbox_ix1_particle(Grid *pG, Domain *pD, long numpar)
02136  *  \brief Shearing box boundary condition, Inner x1 boundary (ibc_x1=4)
02137  *
02138  * This routine works for only 3D.
02139  * Input: numpar: for the packing routine, the array index to start with.
02140  */
02141 static void shearingbox_ix1_particle(Grid *pG, Domain *pD, long numpar)
02142 {
02143 #ifdef MPI_PARALLEL
02144   /* amount of shear, whole (yshear) and fractional (yshift) */
02145   Real yshear, yshift;
02146   Real x2len;                   /* length in x2 (y) direction of a grid */
02147   int id1s, id1r, id2s, id2r;   /* processor ids to send and receive data */
02148   int ind1, ind2, ind3;         /* x1,x2,x3 indices of grid in the domain */
02149   long n1, n2;                  /* number of packed particles */
02150   long cnt_recv;                /* received number of particles */
02151   int err;
02152   MPI_Request rq;
02153   MPI_Status stat;
02154 #endif /* MPI_PARALLEL */
02155 
02156 #ifndef MPI_PARALLEL
02157   packing_ix1_particle_shear(pG, 0, numpar);
02158 #else
02159 /*----------------- MPI case: Step 1. Find locations -------------------------*/
02160 
02161   /* compute the distance the computational domain has sheared in y */
02162   yshear = vshear*pG->time;
02163   yshift = fmod(yshear, Lx2);
02164   x2len = pG->dx2*pG->Nx2;
02165 
02166   /* obtain processor ids to send and receive */
02167   ind1 = 0;             ind3 = my_kproc;
02168   ind2 = my_jproc + (int)(yshift/x2len) + 1;
02169   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02170   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02171   id1s = pD->GridArray[ind3][ind2][ind1].id;    /* for region I (send to) */
02172 
02173   ind2 = my_jproc + (int)(yshift/x2len);
02174   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02175   id2s = pD->GridArray[ind3][ind2][ind1].id;    /* for region II (send to) */
02176 
02177   ind2 = my_jproc - (int)(yshift/x2len) - 1;
02178   if (ind2 < 0) ind2 += pD->NGrid_x2;
02179   if (ind2 < 0) ind2 += pD->NGrid_x2;
02180   id1r = pD->GridArray[ind3][ind2][ind1].id;  /* for region I (receive from) */
02181 
02182   ind2 = my_jproc - (int)(yshift/x2len);
02183   if (ind2 < 0) ind2 += pD->NGrid_x2;
02184   id2r = pD->GridArray[ind3][ind2][ind1].id;  /* for region II (receive from) */
02185 
02186 /*------------------MPI case: Step 2. Exchange particles ---------------------*/
02187 
02188   /* packing particles at region I */
02189   n1 = packing_ix1_particle_shear(pG, 1, numpar);
02190 
02191 /*-------- sub-step 1: exchange rigion I (id1) --------*/
02192 
02193   /* send and receive buffer size to/from region I (id1) */
02194   /* Post a non-blocking receive for the data size */
02195   err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id1r,
02196                              boundary_particle_tag, MPI_COMM_WORLD, &rq);
02197   if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02198 
02199   /* send buffer size */
02200   err = MPI_Send(&n1, 1, MPI_LONG, id1s, boundary_particle_tag, MPI_COMM_WORLD);
02201   if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02202 
02203   /* receive buffer size from */
02204   err = MPI_Wait(&rq, &stat);
02205   if(err) ath_error("[receive_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02206 
02207   /* check space for the receive buffer */
02208   if ((cnt_recv+1) >= recv_bufsize)
02209     realloc_recvbuf();
02210 
02211   /* send and receive buffer to/from region I (id1) */
02212   if (cnt_recv > 0) {
02213     /* Post a non-blocking receive for the data from outer region I */
02214     err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id1r,
02215                               boundary_particle_tag, MPI_COMM_WORLD, &rq);
02216     if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02217   }
02218 
02219   if (n1 > 0) {
02220     /* send buffer */
02221     err = MPI_Send(send_buf, n1*NVAR_P, MPI_DOUBLE, id1s,
02222                              boundary_particle_tag, MPI_COMM_WORLD);
02223     if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02224   }
02225 
02226   /* packing particles at region II (N.B.!) */
02227   n2 = packing_ix1_particle_shear(pG, 2, numpar);
02228   if (cnt_recv > 0) {
02229     /* receive buffer */
02230     err = MPI_Wait(&rq, &stat);
02231     if(err) ath_error("[recv_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02232 
02233     /* unpack the received particles */
02234     unpack_particle(pG, recv_buf, cnt_recv);
02235   }
02236 
02237 /*-------- sub-step 2: exchange region II (id2)  --------*/
02238 
02239   /* send and receive buffer size to/from region II (id2) */
02240   /* Post a non-blocking receive for the data size */
02241   err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id2r,
02242                              boundary_particle_tag, MPI_COMM_WORLD, &rq);
02243   if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02244 
02245   /* send buffer size */
02246   err = MPI_Send(&n2, 1, MPI_LONG, id2s, boundary_particle_tag, MPI_COMM_WORLD);
02247   if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02248 
02249   /* receive buffer size */
02250   err = MPI_Wait(&rq, &stat);
02251   if(err) ath_error("[receive_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02252 
02253   /* check space for the receive buffer */
02254   if ((cnt_recv+1) >= recv_bufsize)
02255     realloc_recvbuf();
02256 
02257   /* send and receive buffer to/from region II (id2) */
02258   if (cnt_recv > 0) {
02259     /* Post a non-blocking receive for the data */
02260     err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id2r,
02261                               boundary_particle_tag, MPI_COMM_WORLD, &rq);
02262     if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02263   }
02264 
02265   if (n2 > 0) {
02266     /* send buffer */
02267     err = MPI_Send(send_buf, n2*NVAR_P, MPI_DOUBLE, id2s,
02268                              boundary_particle_tag, MPI_COMM_WORLD);
02269     if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02270   }
02271 
02272   if (cnt_recv > 0) {
02273     /* receive buffer */
02274     err = MPI_Wait(&rq, &stat);
02275     if(err) ath_error("[recv_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02276 
02277     /* unpack the received particles */
02278     unpack_particle(pG, recv_buf, cnt_recv);
02279   }
02280 
02281 #endif /* MPI_PARALLEL */
02282 
02283   return;
02284 }
02285 
02286 /*----------------------------------------------------------------------------*/
02287 /*! \fn static void shearingbox_ox1_particle(Grid *pG, Domain *pD, long numpar)
02288  *  \brief Shearing box boundary condition, Outer x1 boundary (obc_x1=4)
02289  *
02290  * This routine works for only 3D.
02291  * Input: numpar: for the packing routine, the array index to start with.
02292  */
02293 static void shearingbox_ox1_particle(Grid *pG, Domain *pD, long numpar)
02294 {
02295 #ifdef MPI_PARALLEL
02296   /* amount of shear, whole (yshear) and fractional (yshift) */
02297   Real yshear, yshift;
02298   Real x2len;                   /* length in x2 (y) direction of a grid */
02299   int id1s, id1r, id2s, id2r;   /* processor ids to send and receive data */
02300   int ind1, ind2, ind3;         /* x1,x2,x3 indices of grid in the domain */
02301   long n1, n2;                  /* number of packed particles */
02302   long cnt_recv;                /* received number of particles */
02303   int err;
02304   MPI_Request rq;
02305   MPI_Status stat;
02306 #endif /* MPI_PARALLEL */
02307 
02308 #ifndef MPI_PARALLEL
02309   packing_ox1_particle_shear(pG, 0, numpar);
02310 #else
02311 /*--------------------- MPI case: Step 1. Find locations ---------------------*/
02312 
02313   /* compute the distance the computational domain has sheared in y */
02314   yshear = vshear*pG->time;
02315   yshift = fmod(yshear, Lx2);
02316   x2len = pG->dx2*pG->Nx2;
02317 
02318   /* obtain processor ids to send and receive */
02319   ind1 = pD->NGrid_x1-1;        ind3 = my_kproc;
02320   ind2 = my_jproc - (int)(yshift/x2len) - 1;
02321   if (ind2 < 0) ind2 += pD->NGrid_x2;
02322   if (ind2 < 0) ind2 += pD->NGrid_x2;
02323   id1s = pD->GridArray[ind3][ind2][ind1].id;    /* for region I (send to) */
02324 
02325   ind2 = my_jproc - (int)(yshift/x2len);
02326   if (ind2 < 0) ind2 += pD->NGrid_x2;
02327   id2s = pD->GridArray[ind3][ind2][ind1].id;    /* for region II (send to) */
02328 
02329   ind2 = my_jproc + (int)(yshift/x2len) + 1;
02330   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02331   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02332   id1r = pD->GridArray[ind3][ind2][ind1].id;  /* for region I (receive from) */
02333 
02334   ind2 = my_jproc + (int)(yshift/x2len);
02335   if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02336   id2r = pD->GridArray[ind3][ind2][ind1].id;  /* for region II (receive from) */
02337 
02338 /*--------------MPI case: Step 2. Exchange particles -------------------------*/
02339 
02340   /* packing particles at region I */
02341   n1 = packing_ox1_particle_shear(pG, 1, numpar);
02342 
02343 /*-------- sub-step 1: outer region I with inner rigion I (id1) --------*/
02344   /* send and receive buffer size to/from inner region I (id1) */
02345   /* Post a non-blocking receive for the data size from inner regiion I */
02346   err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id1r,
02347                              boundary_particle_tag, MPI_COMM_WORLD, &rq);
02348   if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02349 
02350   /* send buffer size to inner region I */
02351   err = MPI_Send(&n1, 1, MPI_LONG, id1s, boundary_particle_tag, MPI_COMM_WORLD);
02352   if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02353 
02354   /* receive buffer size from the inner region I */
02355   err = MPI_Wait(&rq, &stat);
02356   if(err) ath_error("[recc_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02357 
02358   /* check space for the receive buffer */
02359   if ((cnt_recv+1) >= recv_bufsize)
02360     realloc_recvbuf();
02361 
02362   /* send and receive buffer to/from inner region I (id1) */
02363   if (cnt_recv > 0) {
02364     /* Post a non-blocking receive for the data from inner region I */
02365     err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id1r,
02366                               boundary_particle_tag, MPI_COMM_WORLD, &rq);
02367     if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02368   }
02369 
02370   if (n1 > 0) {
02371     /* send buffer to inner region I */
02372     err = MPI_Send(send_buf, n1*NVAR_P, MPI_DOUBLE, id1s,
02373                              boundary_particle_tag, MPI_COMM_WORLD);
02374     if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02375   }
02376 
02377   /* packing particles at region II (N.B.!) */
02378   n2 = packing_ox1_particle_shear(pG, 2, numpar);
02379   if (cnt_recv > 0) {
02380     /* receive buffer inner region I */
02381     err = MPI_Wait(&rq, &stat);
02382     if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02383 
02384     /* unpack the received particle */
02385     unpack_particle(pG, recv_buf, cnt_recv);
02386   }
02387 
02388 /*-------- sub-step 2: outer region II with inner region II (id2)  --------*/
02389 
02390   /* send and receive buffer size to/from inner region II (id2) */
02391   /* Post a non-blocking receive for the data size from inner region II */
02392   err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id2r,
02393                              boundary_particle_tag, MPI_COMM_WORLD, &rq);
02394   if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02395 
02396   /* send buffer size to inner region II */
02397   err = MPI_Send(&n2, 1, MPI_LONG, id2s, boundary_particle_tag, MPI_COMM_WORLD);
02398   if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02399 
02400   /* receive buffer size from inner region II */
02401   err = MPI_Wait(&rq, &stat);
02402   if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02403 
02404   /* check space for the receive buffer */
02405   if ((cnt_recv+1) >= recv_bufsize)
02406     realloc_recvbuf();
02407 
02408   /* send and receive buffer to/from inner region II (id2) */
02409   if (cnt_recv > 0) {
02410     /* Post a non-blocking receive for the data from inner region II */
02411     err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id2r,
02412                               boundary_particle_tag, MPI_COMM_WORLD, &rq);
02413     if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02414   }
02415 
02416   if (n2 > 0) {
02417     /* send buffer to inner region II */
02418     err = MPI_Send(send_buf, n2*NVAR_P, MPI_DOUBLE, id2s,
02419                              boundary_particle_tag, MPI_COMM_WORLD);
02420     if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02421   }
02422 
02423   if (cnt_recv > 0) {
02424     /* receive buffer from inner region II */
02425     err = MPI_Wait(&rq, &stat);
02426     if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02427 
02428     /* unpack the received particle */
02429     unpack_particle(pG, recv_buf, cnt_recv);
02430   }
02431 
02432 #endif /* MPI_PARALLEL */
02433 
02434   return;
02435 }
02436 
02437 /*----------------------------------------------------------------------------*/
02438 /*! \fn static long packing_ix1_particle_shear(Grid *pG, int reg, long numpar)
02439  *  \brief Packing the particle inside the inner x1 boundary for shearing box
02440  *
02441  * Input: pG: grid; reg: region, 1 or 2 for mpi case, 0 for non-mpi case
02442  *        numpar: array index to start with.
02443  * Return: number of packed particles in the specified region.
02444  * Note: this routine serves for only 3D
02445  */
02446 static long packing_ix1_particle_shear(Grid *pG, int reg, long numpar)
02447 {
02448   Grain *cur;           /* current pointer */
02449   long n, p;
02450   Real ix1b;            /* coordinate limit in x1 inner boundary */
02451   /* amount of shear, whole (yshear) and fractional (yshift) */
02452   Real yshear, yshift;
02453   /* x2c: y-coordinate marking the demarcation of the two regions */
02454   Real x20, x2c;
02455   double *pd;
02456 
02457 /*---------------- Step.1 -----------------------*/
02458   /* get the distance of shear */
02459   yshear = vshear*pG->time;
02460   yshift = fmod(yshear, Lx2);
02461   x20 = pG->x2_0+(pG->je+pG->jdisp+1)*pG->dx2;
02462   x2c = x20 - fmod(yshear, pG->dx2*pG->Nx2);
02463 
02464   /* get coordinate limits for particles to be packed*/
02465   ix1b = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
02466 
02467   /* buffer assignment */
02468   pd = send_buf;
02469   n = 0;
02470 
02471 /*---------------- Step.2 -----------------------*/
02472   /* loop over all particle to pack particles in the boundary */
02473   p = numpar;
02474   while (p<pG->nparticle) {
02475     cur = &(pG->particle[p]);
02476     p += 1;
02477     if ((cur->pos == 21) || ( (cur->pos == 0) && (cur->x1 < ix1b)))
02478     { /* crossing particle or ghost particle from ox1 */
02479 
02480       if (((reg == 1) && (cur->x2 >= x2c)) || ((reg == 2) && (cur->x2 < x2c)))
02481       {         /* region I */                      /* region II */
02482         /* apply the shift */
02483         cur->x2 = x2min + fmod(cur->x2 - x2min + yshift, Lx2);
02484 
02485         /* pack the particle */
02486         packing_one_particle(cur, n, cur->pos);
02487         n += 1;
02488 
02489         /* delete the particle */
02490         pG->nparticle -= 1;
02491         pG->grproperty[cur->property].num -= 1;
02492         p -= 1;
02493         pG->particle[p] = pG->particle[pG->nparticle];
02494       }
02495 
02496       if (reg == 0) /* non-mpi case, directly shift the particle positions */
02497         cur->x2 = x2min + fmod(cur->x2 - x2min + yshift, Lx2);
02498     }
02499   }
02500 
02501   return n;
02502 }
02503 
02504 /*----------------------------------------------------------------------------*/
02505 /*! \fn static long packing_ox1_particle_shear(Grid *pG, int reg, long numpar)
02506  *  \brief Packing the particle outside the outer x1 boundary for shearing box
02507  *
02508  * Input: pG: grid; reg: region, 1 or 2 for mpi case, 0 for non-mpi case
02509  *        numpar: array index to start with.
02510  * Return: number of packed particles in the specified region.
02511  * Note: this routine serves for only 3D
02512  */
02513 static long packing_ox1_particle_shear(Grid *pG, int reg, long numpar)
02514 {
02515   Grain *cur;           /* current pointer */
02516   long n, p;
02517   Real ox1b;            /* coordinate limit of x1 outer boundary */
02518   /* amount of shear, whole (yshear) and fractional (yshift) */
02519   Real yshear, yshift;
02520   /* x2c: y-coordinate marking the demarcation of the two regions */
02521   Real x20, x2c;
02522   double *pd;
02523 
02524 /*---------------- Step.1 -----------------------*/
02525   /* get the distance of shear */
02526   yshear = vshear*pG->time;
02527   yshift = fmod(yshear, Lx2);
02528   x20 = pG->x2_0+(pG->js+pG->jdisp)*pG->dx2;
02529   x2c = x20 + fmod(yshear, pG->dx2*pG->Nx2);
02530 
02531   /* get coordinate limits for particles to be packed*/
02532   ox1b = pG->x1_0 + (pG->ie + 1 + pG->idisp)*pG->dx1;
02533 
02534   /* buffer assignment */
02535   pd = send_buf;
02536   n = 0;
02537 
02538 /*---------------- Step.2 -----------------------*/
02539   /* loop over all particle to pack particles in the boundary */
02540   p = numpar;
02541   while (p<pG->nparticle) {
02542     cur = &(pG->particle[p]);
02543     p += 1;
02544     if ((cur->pos == 11) || ( (cur->pos == 0) && (cur->x1 >= ox1b)))
02545     { /* crossing particle or ghost particle from ix1 */
02546 
02547       if (((reg == 1) && (cur->x2 < x2c)) || ((reg == 2) && (cur->x2 >= x2c)))
02548       {         /* region I */                      /* region II */
02549 
02550         /* apply the shift */
02551         cur->x2 = x2min + fmod(cur->x2 - x2min + Lx2 - yshift, Lx2);
02552 
02553         /* pack the particle */
02554         packing_one_particle(cur, n, cur->pos);
02555         n += 1;
02556 
02557         /* delete the particle */
02558         pG->nparticle -= 1;
02559         pG->grproperty[cur->property].num -= 1;
02560         p -= 1;
02561         pG->particle[p] = pG->particle[pG->nparticle];
02562       }
02563       if (reg == 0) /* non-mpi case, directly shift the particle positions */
02564         cur->x2 = x2min + fmod(cur->x2 - x2min + Lx2 - yshift, Lx2);
02565     }
02566   }
02567 
02568   return n;
02569 }
02570 
02571 #ifdef FARGO
02572 /*----------------------------------------------------------------------------*/
02573 /*! \fn static long packing_particle_fargo(Grid *pG, Real yl, Real yu)
02574  *  \brief Packing the particle for FARGO
02575  *
02576  * Input: pG: grid; 
02577  *        yl, yu: lower and upper limit of x2 (y) coordinate for particles to
02578  *                be packed
02579  * Return: number of packed particles in the specified region.
02580  */
02581 static long packing_particle_fargo(Grid *pG, Real yl, Real yu)
02582 {
02583   Grain *cur;
02584   long p, n;
02585   double *pd;
02586 
02587   p = 0;
02588   n = 0;
02589   pd = send_buf;
02590   while (p<pG->nparticle) {
02591     cur = &(pG->particle[p]);
02592     p += 1;
02593     if ((cur->x2 >= yl) && (cur->x2 < yu))
02594     { /* pack the particle as it is */
02595       packing_one_particle(cur, n, cur->pos);
02596       n += 1;
02597 
02598       /* delete the particle */
02599       pG->nparticle -= 1;
02600       pG->grproperty[cur->property].num -= 1;
02601       p -= 1;
02602       pG->particle[p] = pG->particle[pG->nparticle];
02603     }
02604   }
02605 
02606   return n;
02607 }
02608 
02609 /*----------------------------------------------------------------------------*/
02610 /*! \fn static int gridshift(Real shift)
02611  *  \brief Get the number of shift of grid in the y direction */
02612 static int gridshift(Real shift)
02613 {
02614   if (shift>0)
02615     return (int)(shift)+1;
02616   else if (shift<0)
02617     return (int)(shift)-1;
02618   else return 0;
02619 }
02620 
02621 #endif /* FARGO */
02622 
02623 #endif /* SHEARING_BOX */
02624 
02625 #undef NVAR_P
02626 
02627 #endif /*PARTICLES*/

Generated on Mon Sep 27 2010 23:03:07 for Athena by  doxygen 1.7.1