• Main Page
  • Classes
  • Files
  • File List
  • File Members

particles/feedback.c

Go to the documentation of this file.
00001 #include "../copyright.h"
00002 /*============================================================================*/
00003 /*! \file feedback.c
00004  *  \brief Exchange particle feedback between boundary cells.
00005  *
00006  * PURPOSE: Exchange particle feedback between boundary cells. The procedure is
00007  *   opposite to setting boundary conditions. Extra feedback forces are exterted
00008  *   to cells outside the grid boundary, by feedback exchange, these forces are
00009  *   properly added to cells inside the grid boundary, according to various
00010  *   boundary conditions.
00011  *
00012  *   Currently, for shearing box simulation in 3D, FARGO must be enabled.
00013  *
00014  * CONTAINS PUBLIC FUNCTIONS:
00015  * - exchange_feedback()
00016  * - exchange_feedback_init()
00017  * - exchange_feedback_fun()
00018  * - exchange_feedback_destruct()
00019  *
00020  * PRIVATE FUNCTIONS:
00021  * - reflecting_???
00022  * - outflow_???
00023  * - periodic_???
00024  * - send_???
00025  * - receive_???
00026  * where ???=[ix1,ox1,ix2,ox2,ix3,ox3]
00027  * 
00028  * History:
00029  * - Written by Xuening Bai, Apr. 2009                                        */
00030 /*============================================================================*/
00031 #include <stdio.h>
00032 #include <stdlib.h>
00033 #include <math.h>
00034 #include "../defs.h"
00035 #include "../athena.h"
00036 #include "../prototypes.h"
00037 #include "prototypes.h"
00038 #include "particle.h"
00039 #include "../globals.h"
00040 
00041 
00042 #ifdef FEEDBACK         /* endif at the end of the file */
00043 
00044 /* number of quantities to be exchanged for each cell */
00045 #define NVAR_F 4
00046 /* number of boundary cells to be exchanged */
00047 #define NGF    2
00048 
00049 #ifdef MPI_PARALLEL
00050 
00051 /* MPI send and receive buffer, size dynamically determined near end of
00052  * set_bvals_init() based on number of zones in each grid */
00053 static double *send_buf = NULL, *recv_buf = NULL;
00054 
00055 #endif /* MPI_PARALLEL */
00056 
00057 /* processor indices in the computational domain */
00058 static int my_iproc, my_jproc, my_kproc;
00059 /* grid index limit for feedback exchange */
00060 static int il,iu, jl,ju, kl,ku;
00061 /* min and max coordinate limits of the computational domain */
00062 static Real x1min,x1max,x2min,x2max,x3min,x3max;
00063 /* domain size in x1, x2, x3 direction */
00064 static Real Lx1, Lx2, Lx3;
00065 
00066 /* boundary condition function pointers. local to this function  */
00067 static VBCFun_t apply_ix1 = NULL, apply_ox1 = NULL;
00068 static VBCFun_t apply_ix2 = NULL, apply_ox2 = NULL;
00069 static VBCFun_t apply_ix3 = NULL, apply_ox3 = NULL;
00070 
00071 /*====================== PROTOTYPE OF PRIVATE FUNCTIONS ======================*/
00072 /*----------------------------------------------------------------------------*/
00073 
00074 /*==============================================================================
00075  * PRIVATE FUNCTION PROTOTYPES:
00076  *   reflect_???()  - apply reflecting BCs at boundary ???
00077  *   outflow_???()  - apply outflow BCs at boundary ???
00078  *   periodic_???() - apply periodic BCs at boundary ???
00079  *   send_???()     - MPI send of data at ??? boundary
00080  *   receive_???()  - MPI receive of data at ??? boundary
00081  *============================================================================*/
00082 
00083 static void reflect_ix1_feedback(Grid *pG);
00084 static void reflect_ox1_feedback(Grid *pG);
00085 static void reflect_ix2_feedback(Grid *pG);
00086 static void reflect_ox2_feedback(Grid *pG);
00087 static void reflect_ix3_feedback(Grid *pG);
00088 static void reflect_ox3_feedback(Grid *pG);
00089 
00090 static void outflow_feedback(Grid *pG);
00091 
00092 static void periodic_ix1_feedback(Grid *pG);
00093 static void periodic_ox1_feedback(Grid *pG);
00094 static void periodic_ix2_feedback(Grid *pG);
00095 static void periodic_ox2_feedback(Grid *pG);
00096 static void periodic_ix3_feedback(Grid *pG);
00097 static void periodic_ox3_feedback(Grid *pG);
00098 
00099 #ifdef MPI_PARALLEL
00100 static void send_ix1_feedback(Grid *pG);
00101 static void send_ox1_feedback(Grid *pG);
00102 static void send_ix2_feedback(Grid *pG);
00103 static void send_ox2_feedback(Grid *pG);
00104 static void send_ix3_feedback(Grid *pG);
00105 static void send_ox3_feedback(Grid *pG);
00106 
00107 static void recv_ix1_feedback(Grid *pG, MPI_Request *prq);
00108 static void recv_ox1_feedback(Grid *pG, MPI_Request *prq);
00109 static void recv_ix2_feedback(Grid *pG, MPI_Request *prq);
00110 static void recv_ox2_feedback(Grid *pG, MPI_Request *prq);
00111 static void recv_ix3_feedback(Grid *pG, MPI_Request *prq);
00112 static void recv_ox3_feedback(Grid *pG, MPI_Request *prq);
00113 #endif /* MPI_PARALLEL */
00114 
00115 #ifdef SHEARING_BOX
00116 static void shearingbox_ix1_feedback(Grid *pG, Domain *pD);
00117 static void shearingbox_ox1_feedback(Grid *pG, Domain *pD);
00118 #endif /* SHEARING_BOX */
00119 
00120 
00121 /*=========================== PUBLIC FUNCTIONS ===============================*/
00122 /*----------------------------------------------------------------------------*/
00123 /*! \fn void exchange_feedback(Grid *pG, Domain *pD)
00124  *  \brief Calls appropriate functions to copy feedback in the ghost
00125  *    zones back to the grid.  
00126  *
00127  *    The function pointers (*apply_???) are set during
00128  *    initialization by exchange_feedback_init() to be either a user-defined
00129  *    function, or one of the functions corresponding to reflecting, periodic,
00130  *    or outflow.  If the left- or right-Grid ID numbers are >= 1 (neighboring
00131  *    grids exist), then MPI calls are used.
00132  *
00133  *  Order for updating boundary conditions must always be x3-x2-x1 in order to
00134  *  fill the corner cells properly (opposite to setting MHD B.C.!)
00135  */
00136 
00137 void exchange_feedback(Grid *pG, Domain *pD)
00138 {
00139 #ifdef MPI_PARALLEL
00140   int cnt1, cnt2, cnt3, cnt, err;
00141   MPI_Request rq;
00142 #endif /* MPI_PARALLEL */
00143 
00144 /*--- Step 1. ------------------------------------------------------------------
00145  * Feedback exchange in x3-direction */
00146 
00147   if (pG->Nx3 > 1){
00148 
00149 #ifdef MPI_PARALLEL
00150     cnt1 = pG->Nx1 > 1 ? pG->Nx1 + 2*NGF : 1;
00151     cnt2 = pG->Nx2 > 1 ? pG->Nx2 + 2*NGF : 1;
00152     cnt = NGF*cnt1*cnt2*NVAR_F;
00153 
00154 /* MPI blocks to both left and right */
00155     if (pG->rx3_id >= 0 && pG->lx3_id >= 0) {
00156       /* Post a non-blocking receive for the input data from the left grid */
00157       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx3_id, 
00158                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00159       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00160 
00161       send_ox3_feedback(pG);       /* send R */
00162       recv_ix3_feedback(pG, &rq);  /* listen L */
00163 
00164       /* Post a non-blocking receive for the input data from the right grid */
00165       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx3_id,
00166                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00167       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00168 
00169       send_ix3_feedback(pG);       /* send L */
00170       recv_ox3_feedback(pG, &rq);  /* listen R */
00171     }
00172 
00173 /* Physical boundary on left, MPI block on right */
00174     if (pG->rx3_id >= 0 && pG->lx3_id < 0) {
00175       /* Post a non-blocking receive for the input data from the right grid */
00176       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx3_id,
00177                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00178       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00179 
00180       send_ox3_feedback(pG);       /* send R */
00181       (*apply_ix3)     (pG);
00182       recv_ox3_feedback(pG, &rq);  /* listen R */
00183     }
00184 
00185 /* MPI block on left, Physical boundary on right */
00186     if (pG->rx3_id < 0 && pG->lx3_id >= 0) {
00187       /* Post a non-blocking receive for the input data from the left grid */
00188       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx3_id,
00189                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00190       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00191 
00192       send_ix3_feedback(pG);       /* send L */
00193       (*apply_ox3)     (pG);
00194       recv_ix3_feedback(pG, &rq);  /* listen L */
00195     }
00196 #endif /* MPI_PARALLEL */
00197 
00198 /* Physical boundaries on both left and right */
00199     if (pG->rx3_id < 0 && pG->lx3_id < 0) {
00200       (*apply_ix3)(pG);
00201       (*apply_ox3)(pG);
00202     }
00203 
00204   }
00205 
00206 /*--- Step 2. ------------------------------------------------------------------
00207  * Feedback exchange in x2-direction */
00208 
00209   if (pG->Nx2 > 1){
00210 
00211 #ifdef MPI_PARALLEL
00212     cnt1 = pG->Nx1 > 1 ? pG->Nx1 + 2*NGF : 1;
00213     cnt3 = pG->Nx3 > 1 ? pG->Nx3 : 1;
00214     cnt = NGF*cnt1*cnt3*NVAR_F;
00215 
00216 /* MPI blocks to both left and right */
00217     if (pG->rx2_id >= 0 && pG->lx2_id >= 0) {
00218       /* Post a non-blocking receive for the input data from the left grid */
00219       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00220                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00221       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00222 
00223       send_ox2_feedback(pG);       /* send R */
00224       recv_ix2_feedback(pG, &rq);  /* listen L */
00225 
00226       /* Post a non-blocking receive for the input data from the right grid */
00227       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00228                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00229       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00230 
00231       send_ix2_feedback(pG);       /* send L */
00232       recv_ox2_feedback(pG, &rq);  /* listen R */
00233     }
00234 
00235 /* Physical boundary on left, MPI block on right */
00236     if (pG->rx2_id >= 0 && pG->lx2_id < 0) {
00237       /* Post a non-blocking receive for the input data from the right grid */
00238       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00239                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00240       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00241 
00242       send_ox2_feedback(pG);       /* send R */
00243       (*apply_ix2)     (pG);
00244       recv_ox2_feedback(pG, &rq);  /* listen R */
00245     }
00246 
00247 /* MPI block on left, Physical boundary on right */
00248     if (pG->rx2_id < 0 && pG->lx2_id >= 0) {
00249       /* Post a non-blocking receive for the input data from the left grid */
00250       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00251                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00252       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00253 
00254       send_ix2_feedback(pG);       /* send L */
00255       (*apply_ox2)     (pG);
00256       recv_ix2_feedback(pG, &rq);  /* listen L */
00257     }
00258 #endif /* MPI_PARALLEL */
00259 
00260 /* Physical boundaries on both left and right */
00261     if (pG->rx2_id < 0 && pG->lx2_id < 0) {
00262       (*apply_ix2)(pG);
00263       (*apply_ox2)(pG);
00264     }
00265 
00266   }
00267 
00268 /*--- Step 2.cont.  ------------------------------------------------------------
00269  * shift the feedback in x1 direction */
00270 #ifdef SHEARING_BOX
00271 #ifndef FARGO
00272   if (pG->Nx3 > 1) /* 3D shearing box */
00273   {
00274     if (my_iproc == 0)
00275       shearingbox_ix1_feedback(pG, pD);
00276 
00277     if (my_iproc == pD->NGrid_x1-1)
00278       shearingbox_ox1_feedback(pG, pD);
00279   }
00280 #endif
00281 #endif
00282 
00283 /*--- Step 3. ------------------------------------------------------------------
00284  * Feedback exchange in x1-direction */
00285 
00286   if (pG->Nx1 > 1){
00287 
00288 #ifdef MPI_PARALLEL
00289     cnt2 = pG->Nx2 > 1 ? pG->Nx2 : 1;
00290     cnt3 = pG->Nx3 > 1 ? pG->Nx3 : 1;
00291     cnt = NGF*cnt2*cnt3*NVAR_F;
00292 
00293 /* MPI blocks to both left and right */
00294     if (pG->rx1_id >= 0 && pG->lx1_id >= 0) {
00295       /* Post a non-blocking receive for the input data from the left grid */
00296       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx1_id,
00297                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00298       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00299 
00300       send_ox1_feedback(pG);       /* send R */
00301       recv_ix1_feedback(pG, &rq);  /* listen L */
00302 
00303       /* Post a non-blocking receive for the input data from the right grid */
00304       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx1_id,
00305                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00306       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00307 
00308       send_ix1_feedback(pG);       /* send L */
00309       recv_ox1_feedback(pG, &rq);  /* listen R */
00310     }
00311 
00312 /* Physical boundary on left, MPI block on right */
00313     if (pG->rx1_id >= 0 && pG->lx1_id < 0) {
00314       /* Post a non-blocking receive for the input data from the right grid */
00315       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx1_id,
00316                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00317       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00318 
00319       send_ox1_feedback(pG);       /* send R */
00320       (*apply_ix1)     (pG);
00321       recv_ox1_feedback(pG, &rq);  /* listen R */
00322     }
00323 
00324 /* MPI block on left, Physical boundary on right */
00325     if (pG->rx1_id < 0 && pG->lx1_id >= 0) {
00326       /* Post a non-blocking receive for the input data from the left grid */
00327       err = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx1_id,
00328                                      boundary_cells_tag, MPI_COMM_WORLD, &rq);
00329       if(err) ath_error("[exchange_feedback]: MPI_Irecv error = %d\n",err);
00330 
00331       send_ix1_feedback(pG);       /* send L */
00332       (*apply_ox1)     (pG);
00333       recv_ix1_feedback(pG, &rq);  /* listen L */
00334     }
00335 #endif /* MPI_PARALLEL */
00336 
00337 /* Physical boundaries on both left and right */
00338     if (pG->rx1_id < 0 && pG->lx1_id < 0) {
00339       (*apply_ix1)(pG);
00340       (*apply_ox1)(pG);
00341     } 
00342 
00343   }
00344 
00345   return;
00346 
00347 }
00348 
00349 /*----------------------------------------------------------------------------*/
00350 /*! \fn void exchange_feedback_init(Grid *pG, Domain *pD)
00351  *  \brief Sets function pointers for feedback exchange during
00352  *   initialization, allocates memory for send/receive buffers with MPI
00353  */
00354 void exchange_feedback_init(Grid *pG, Domain *pD)
00355 {
00356   int ibc_x1, obc_x1; /* x1 inner and outer boundary condition flag */
00357   int ibc_x2, obc_x2; /* x2 inner and outer boundary condition flag */
00358   int ibc_x3, obc_x3; /* x3 inner and outer boundary condition flag */
00359 #ifdef MPI_PARALLEL
00360   int i,j,k;
00361   int x1cnt, x2cnt, x3cnt; /* Number of Gas passed in x1-, x2-, x3-dir. */
00362   int nx1t, nx2t, nx3t, size;
00363 #endif /* MPI_PARALLEL */
00364 
00365 /* set left and right grid indices */
00366   if (pG->Nx1 > 1) {
00367     il = pG->is - NGF;          iu = pG->ie + NGF;
00368   } else {
00369     il = pG->is;                iu = pG->ie;
00370   }
00371 
00372   if (pG->Nx2 > 1) {
00373     jl = pG->js - NGF;          ju = pG->je + NGF;
00374   } else {
00375     jl = pG->js;                ju = pG->je;
00376   }
00377 
00378   if (pG->Nx3 > 1) {
00379     kl = pG->ks - NGF;          ku = pG->ke + NGF;
00380   } else {
00381     kl = pG->ks;                ku = pG->ke;
00382   }
00383 
00384 /* calculate distances of the computational domain and shear velocity */
00385   x1min = par_getd("grid","x1min");
00386   x1max = par_getd("grid","x1max");
00387   Lx1 = x1max - x1min;
00388 
00389   x2min = par_getd("grid","x2min");
00390   x2max = par_getd("grid","x2max");
00391   Lx2 = x2max - x2min;
00392 
00393   x3min = par_getd("grid","x3min");
00394   x3max = par_getd("grid","x3max");
00395   Lx3 = x3max - x3min;
00396 
00397   get_myGridIndex(pD, pG->my_id, &my_iproc, &my_jproc, &my_kproc);
00398 
00399 /* Set function pointers for physical boundaries in x1-direction */
00400 
00401   if(pG->Nx1 > 1) {
00402     if(apply_ix1 == NULL){
00403 
00404       ibc_x1 = par_geti("grid","ibc_x1");
00405       switch(ibc_x1){
00406 
00407       case 1: /* Reflecting */
00408         apply_ix1 = reflect_ix1_feedback;
00409         break;
00410       case 5: /* Reflecting */
00411         apply_ix1 = reflect_ix1_feedback;
00412         break;
00413 
00414       case 2: /* Outflow */
00415         apply_ix1 = outflow_feedback;
00416         break;
00417 
00418       case 4: /* Periodic */
00419         apply_ix1 = periodic_ix1_feedback;
00420         break;
00421 
00422       default:
00423         ath_perr(-1,"[exchange_feedback_init]: ibc_x1 = %d unknown\n",ibc_x1);
00424         exit(EXIT_FAILURE);
00425       }
00426     }
00427 
00428     if(apply_ox1 == NULL){
00429 
00430       obc_x1 = par_geti("grid","obc_x1");
00431       switch(obc_x1){
00432 
00433       case 1: /* Reflecting */
00434         apply_ox1 = reflect_ox1_feedback;
00435         break;
00436       case 5: /* Reflecting */
00437         apply_ox1 = reflect_ox1_feedback;
00438         break;
00439 
00440       case 2: /* Outflow */
00441         apply_ox1 = outflow_feedback;
00442         break;
00443 
00444       case 4: /* Periodic */
00445         apply_ox1 = periodic_ox1_feedback;
00446         break;
00447 
00448       default:
00449         ath_perr(-1,"[exchange_feedback_init]: obc_x1 = %d unknown\n",obc_x1);
00450         exit(EXIT_FAILURE);
00451       }
00452     }
00453   }
00454 
00455 /* Set function pointers for physical boundaries in x2-direction */
00456 
00457   if(pG->Nx2 > 1) {
00458     if(apply_ix2 == NULL){
00459 
00460       ibc_x2 = par_geti("grid","ibc_x2");
00461       switch(ibc_x2){
00462 
00463       case 1: /* Reflecting */
00464         apply_ix2 = reflect_ix2_feedback;
00465         break;
00466       case 5: /* Reflecting */
00467         apply_ix2 = reflect_ix2_feedback;
00468         break;
00469 
00470       case 2: /* Outflow */
00471         apply_ix2 = outflow_feedback;
00472         break;
00473 
00474       case 4: /* Periodic */
00475         apply_ix2 = periodic_ix2_feedback;
00476         break;
00477 
00478       default:
00479         ath_perr(-1,"[exchange_feedback_init]: ibc_x2 = %d unknown\n",ibc_x2);
00480         exit(EXIT_FAILURE);
00481       }
00482     }
00483 
00484     if(apply_ox2 == NULL){
00485 
00486       obc_x2 = par_geti("grid","obc_x2");
00487       switch(obc_x2){
00488 
00489       case 1: /* Reflecting */
00490         apply_ox2 = reflect_ox2_feedback;
00491         break;
00492       case 5: /* Reflecting */
00493         apply_ox2 = reflect_ox2_feedback;
00494         break;
00495 
00496       case 2: /* Outflow */
00497         apply_ox2 = outflow_feedback;
00498         break;
00499 
00500       case 4: /* Periodic */
00501         apply_ox2 = periodic_ox2_feedback;
00502         break;
00503 
00504       default:
00505         ath_perr(-1,"[exchange_feedback_init]: obc_x2 = %d unknown\n",obc_x2);
00506         exit(EXIT_FAILURE);
00507       }
00508     }
00509   }
00510 
00511 /* Set function pointers for physical boundaries in x3-direction */
00512 
00513   if(pG->Nx3 > 1) {
00514     if(apply_ix3 == NULL){
00515 
00516       ibc_x3 = par_geti("grid","ibc_x3");
00517       switch(ibc_x3){
00518 
00519       case 1: /* Reflecting */
00520         apply_ix3 = reflect_ix3_feedback;
00521         break;
00522       case 5: /* Reflecting */
00523         apply_ix3 = reflect_ix3_feedback;
00524         break;
00525 
00526       case 2: /* Outflow */
00527         apply_ix3 = outflow_feedback;
00528         break;
00529 
00530       case 4: /* Periodic */
00531         apply_ix3 = periodic_ix3_feedback;
00532         break;
00533 
00534       default:
00535         ath_perr(-1,"[exchange_feedback_init]: ibc_x3 = %d unknown\n",ibc_x3);
00536         exit(EXIT_FAILURE);
00537       }
00538     }
00539 
00540     if(apply_ox3 == NULL){
00541 
00542       obc_x3 = par_geti("grid","obc_x3");
00543       switch(obc_x3){
00544 
00545       case 1: /* Reflecting */
00546         apply_ox3 = reflect_ox3_feedback;
00547         break;
00548       case 5: /* Reflecting */
00549         apply_ox3 = reflect_ox3_feedback;
00550         break;
00551 
00552       case 2: /* Outflow */
00553         apply_ox3 = outflow_feedback;
00554         break;
00555 
00556       case 4: /* Periodic */
00557         apply_ox3 = periodic_ox3_feedback;
00558         break;
00559 
00560       default:
00561         ath_perr(-1,"[exchange_feedback_init]: obc_x3 = %d unknown\n",obc_x3);
00562         exit(EXIT_FAILURE);
00563       }
00564     }
00565   }
00566 
00567 #ifdef MPI_PARALLEL
00568   x1cnt = x2cnt = x3cnt = 0;
00569 
00570   for (k=0; k<(pD->NGrid_x3); k++){
00571     for (j=0; j<(pD->NGrid_x2); j++){
00572       for (i=0; i<(pD->NGrid_x1); i++){
00573         if(pD->NGrid_x3 > 1){
00574           nx1t = pD->GridArray[k][j][i].ige - pD->GridArray[k][j][i].igs + 1;
00575           if(nx1t > 1) nx1t += 2*NGF;
00576 
00577           nx2t = pD->GridArray[k][j][i].jge - pD->GridArray[k][j][i].jgs + 1;
00578           if(nx2t > 1) nx2t += 2*NGF;
00579 
00580           x3cnt = nx1t*nx2t > x3cnt ? nx1t*nx2t : x3cnt;
00581         }
00582 
00583         if(pD->NGrid_x2 > 1){
00584           nx1t = pD->GridArray[k][j][i].ige - pD->GridArray[k][j][i].igs + 1;
00585           if(nx1t > 1) nx1t += 2*NGF;
00586 
00587           nx3t = pD->GridArray[k][j][i].kge - pD->GridArray[k][j][i].kgs + 1;
00588           if(nx3t > 1) nx3t += 1;
00589 
00590           x2cnt = nx1t*nx3t > x2cnt ? nx1t*nx3t : x2cnt;
00591         }
00592 
00593         if(pD->NGrid_x1 > 1){
00594           nx2t = pD->GridArray[k][j][i].jge - pD->GridArray[k][j][i].jgs + 1;
00595           if(nx2t > 1) nx2t += 1;
00596 
00597           nx3t = pD->GridArray[k][j][i].kge - pD->GridArray[k][j][i].kgs + 1;
00598           if(nx3t > 1) nx3t += 1;
00599 
00600           x1cnt = nx2t*nx3t > x1cnt ? nx2t*nx3t : x1cnt;
00601         }
00602 
00603       }
00604     }
00605   }
00606 
00607   size = x1cnt > x2cnt ? x1cnt : x2cnt;
00608   size = x3cnt >  size ? x3cnt : size;
00609 
00610   size *= NGF; /* Multiply by the third dimension */
00611 
00612   if (size > 0) {
00613     if((send_buf = (double*)malloc(size*NVAR_F*sizeof(double))) == NULL)
00614       ath_error("[exchange_feedback_init]: Failed to allocate send buffer\n");
00615 
00616     if((recv_buf = (double*)malloc(size*NVAR_F*sizeof(double))) == NULL)
00617       ath_error("[exchange_feedback_init]: Failed to allocate recv buffer\n");
00618   }
00619 #endif /* MPI_PARALLEL */
00620 
00621   return;
00622 }
00623 
00624 /*----------------------------------------------------------------------------*/
00625 /*! \fn void exchange_feedback_fun(enum Direction dir, VBCFun_t prob_bc)
00626  *  \brief Sets function pointers for user-defined feedback
00627  *   exchange in problem file
00628  */
00629 
00630 void exchange_feedback_fun(enum Direction dir, VBCFun_t prob_bc)
00631 {
00632   switch(dir){
00633   case left_x1:
00634     apply_ix1 = prob_bc;
00635     break;
00636   case right_x1:
00637     apply_ox1 = prob_bc;
00638     break;
00639   case left_x2:
00640     apply_ix2 = prob_bc;
00641     break;
00642   case right_x2:
00643     apply_ox2 = prob_bc;
00644     break;
00645   case left_x3:
00646     apply_ix3 = prob_bc;
00647     break;
00648   case right_x3:
00649     apply_ox3 = prob_bc;
00650     break;
00651   default:
00652     ath_perr(-1,"[set_bvals_particle_fun]: Unknown direction = %d\n",dir);
00653     exit(EXIT_FAILURE);
00654   }
00655   return;
00656 }
00657 
00658 /*! \fn void exchange_feedback_destruct(Grid *pG, Domain *pD)
00659  *  \brief Finalize feedback exchange */
00660 void exchange_feedback_destruct(Grid *pG, Domain *pD)
00661 {
00662   apply_ix1 = NULL;
00663   apply_ox1 = NULL;
00664   apply_ix2 = NULL;
00665   apply_ox2 = NULL;
00666   apply_ix3 = NULL;
00667   apply_ox3 = NULL;
00668 #ifdef MPI_PARALLEL
00669   free(send_buf);
00670   free(recv_buf);
00671 #endif
00672   return;
00673 }
00674 
00675 /*=========================== PRIVATE FUNCTIONS ==============================*/
00676 /* Following are the functions:
00677  *   reflecting_???
00678  *   outflow_???
00679  *   periodic_???
00680  *   send_???
00681  *   receive_???
00682  * where ???=[ix1,ox1,ix2,ox2,ix3,ox3]
00683  */
00684 
00685 /*----------------------------------------------------------------------------*/
00686 /*! \fn static void reflect_ix3_feedback(Grid *pG)
00687  *  \brief REFLECTING boundary conditions, Inner x3 boundary (ibc_x3=1,5)
00688  */
00689 
00690 static void reflect_ix3_feedback(Grid *pG)
00691 {
00692   int kr;
00693   int i,j,k;
00694 
00695   for (k=nghost-NGF; k<nghost; k++) {
00696     kr = 2*nghost-k-1;
00697     for (j=jl; j<=ju; j++) {
00698       for (i=il; i<=iu; i++) {
00699         pG->Coup[kr][j][i].fb1 += pG->Coup[k][j][i].fb1;
00700         pG->Coup[kr][j][i].fb2 += pG->Coup[k][j][i].fb2;
00701         pG->Coup[kr][j][i].fb3 -= pG->Coup[k][j][i].fb3;
00702 
00703         pG->Coup[kr][j][i].Eloss += pG->Coup[k][j][i].Eloss;
00704       }
00705     }
00706   }
00707 
00708   return;
00709 }
00710 
00711 /*----------------------------------------------------------------------------*/
00712 /*! \fn static void reflect_ox3_feedback(Grid *pG)
00713  *  \brief REFLECTING boundary conditions, Outer x3 boundary (obc_x3=1,5)
00714  */
00715 
00716 static void reflect_ox3_feedback(Grid *pG)
00717 {
00718   int kr;
00719   int i,j,k;
00720 
00721   for (k=pG->ke+1; k<=pG->ke+NGF; k++) {
00722     kr = 2*pG->ke-k+1;
00723     for (j=jl; j<=ju; j++) {
00724       for (i=il; i<=iu; i++) {
00725         pG->Coup[kr][j][i].fb1 += pG->Coup[k][j][i].fb1;
00726         pG->Coup[kr][j][i].fb2 += pG->Coup[k][j][i].fb2;
00727         pG->Coup[kr][j][i].fb3 -= pG->Coup[k][j][i].fb3; /* reflection */
00728 
00729         pG->Coup[kr][j][i].Eloss += pG->Coup[k][j][i].Eloss;
00730       }
00731     }
00732   }
00733 
00734   return;
00735 }
00736 
00737 /*----------------------------------------------------------------------------*/
00738 /*! \fn static void reflect_ix2_feedback(Grid *pG)
00739  *  \brief REFLECTING boundary conditions, Inner x2 boundary (ibc_x2=1,5)
00740  */
00741 static void reflect_ix2_feedback(Grid *pG)
00742 {
00743   int jr;
00744   int i,j,k;
00745 
00746   for (k=pG->ks; k<=pG->ke; k++) {
00747     for (j=nghost-NGF; j<nghost; j++) {
00748       jr = 2*nghost-j-1;
00749       for (i=il; i<=iu; i++) {
00750         pG->Coup[k][jr][i].fb1 += pG->Coup[k][j][i].fb1;
00751         pG->Coup[k][jr][i].fb2 -= pG->Coup[k][j][i].fb2; /* reflection */
00752         pG->Coup[k][jr][i].fb3 += pG->Coup[k][j][i].fb3;
00753 
00754         pG->Coup[k][jr][i].Eloss += pG->Coup[k][j][i].Eloss;
00755       }
00756     }
00757   }
00758 
00759   return;
00760 }
00761 
00762 /*----------------------------------------------------------------------------*/
00763 /*! \fn static void reflect_ox2_feedback(Grid *pG)
00764  *  \brief REFLECTING boundary conditions, Outer x2 boundary (obc_x2=1,5)
00765  */
00766 
00767 static void reflect_ox2_feedback(Grid *pG)
00768 {
00769   int jr;
00770   int i,j,k;
00771 
00772   for (k=pG->ks; k<=pG->ke; k++) {
00773     for (j=pG->je+1; j<=pG->je+NGF; j++) {
00774       jr = 2*pG->je-j+1;
00775       for (i=il; i<=iu; i++) {
00776         pG->Coup[k][jr][i].fb1 += pG->Coup[k][j][i].fb1;
00777         pG->Coup[k][jr][i].fb2 -= pG->Coup[k][j][i].fb2; /* reflection */
00778         pG->Coup[k][jr][i].fb3 += pG->Coup[k][j][i].fb3;
00779 
00780         pG->Coup[k][jr][i].Eloss += pG->Coup[k][j][i].Eloss;
00781       }
00782     }
00783   }
00784 
00785   return;
00786 }
00787 
00788 /*----------------------------------------------------------------------------*/
00789 /*! \fn static void reflect_ix1_feedback(Grid *pG)
00790  *  \brief REFLECTING boundary conditions, Inner x1 boundary (ibc_x1=1,5)
00791  */
00792 
00793 static void reflect_ix1_feedback(Grid *pG)
00794 {
00795   int ir;
00796   int i,j,k;
00797 
00798   for (k=pG->ks; k<=pG->ke; k++) {
00799     for (j=pG->js; j<=pG->je; j++) {
00800       for (i=nghost-NGF; i<nghost; i++) {
00801         ir = 2*nghost-i-1;
00802         pG->Coup[k][j][ir].fb1 -= pG->Coup[k][j][i].fb1; /* reflection */
00803         pG->Coup[k][j][ir].fb2 += pG->Coup[k][j][i].fb2;
00804         pG->Coup[k][j][ir].fb3 += pG->Coup[k][j][i].fb3;
00805 
00806         pG->Coup[k][j][ir].Eloss += pG->Coup[k][j][i].Eloss;
00807       }
00808     }
00809   }
00810 
00811   return;
00812 }
00813 
00814 /*----------------------------------------------------------------------------*/
00815 /*! \fn static void reflect_ox1_feedback(Grid *pG)
00816  *  \brief REFLECTING boundary conditions, Outer x1 boundary (obc_x1=1,5)
00817  */
00818 
00819 static void reflect_ox1_feedback(Grid *pG)
00820 {
00821   int ir;
00822   int i,j,k;
00823 
00824   for (k=pG->ks; k<=pG->ke; k++) {
00825     for (j=pG->js; j<=pG->je; j++) {
00826       for (i=pG->ie+1; i<=pG->ie+NGF; i++) {
00827         ir = 2*pG->ie-i+1;
00828         pG->Coup[k][j][ir].fb1 -= pG->Coup[k][j][i].fb1; /* reflection */
00829         pG->Coup[k][j][ir].fb2 += pG->Coup[k][j][i].fb2;
00830         pG->Coup[k][j][ir].fb3 += pG->Coup[k][j][i].fb3;
00831 
00832         pG->Coup[k][j][ir].Eloss += pG->Coup[k][j][i].Eloss;
00833       }
00834     }
00835   }
00836 
00837   return;
00838 }
00839 
00840 /*----------------------------------------------------------------------------*/
00841 /*! \fn static void outflow_feedback(Grid *pG)
00842  *  \brief OUTFLOW boundary conditions (ibc=1,5), essentially do nothing
00843  */
00844 
00845 static void outflow_feedback(Grid *pG)
00846 {
00847   return;
00848 }
00849 
00850 /*----------------------------------------------------------------------------*/
00851 /*! \fn static void periodic_ix3_feedback(Grid *pG)
00852  *  \brief PERIODIC boundary conditions, Inner x3 boundary (ibc_x3=4)
00853  */
00854 static void periodic_ix3_feedback(Grid *pG)
00855 {
00856   int dk = pG->ke - pG->ks + 1;
00857   int i,j,k;
00858 
00859   for (k=nghost; k<nghost+NGF; k++) {
00860     for (j=jl; j<=ju; j++) {
00861       for (i=il; i<=iu; i++) {
00862         pG->Coup[k][j][i].fb1 += pG->Coup[k+dk][j][i].fb1;
00863         pG->Coup[k][j][i].fb2 += pG->Coup[k+dk][j][i].fb2;
00864         pG->Coup[k][j][i].fb3 += pG->Coup[k+dk][j][i].fb3;
00865 
00866         pG->Coup[k][j][i].Eloss += pG->Coup[k+dk][j][i].Eloss;
00867       }
00868     }
00869   }
00870 
00871   return;
00872 }
00873 
00874 /*----------------------------------------------------------------------------*/
00875 /*! \fn static void periodic_ox3_feedback(Grid *pG) 
00876  *  \brief PERIODIC boundary conditions, Outer x3 boundary (obc_x3=4)
00877  */
00878 
00879 static void periodic_ox3_feedback(Grid *pG)
00880 {
00881   int dk = pG->ke - pG->ks + 1;
00882   int i,j,k;
00883 
00884   for (k=nghost-NGF; k<nghost; k++) {
00885     for (j=jl; j<=ju; j++) {
00886       for (i=il; i<=iu; i++) {
00887         pG->Coup[k+dk][j][i].fb1 += pG->Coup[k][j][i].fb1;
00888         pG->Coup[k+dk][j][i].fb2 += pG->Coup[k][j][i].fb2;
00889         pG->Coup[k+dk][j][i].fb3 += pG->Coup[k][j][i].fb3;
00890 
00891         pG->Coup[k+dk][j][i].Eloss += pG->Coup[k][j][i].Eloss;
00892       }
00893     }
00894   }
00895 
00896   return;
00897 }
00898 
00899 /*----------------------------------------------------------------------------*/
00900 /*! \fn static void periodic_ix2_feedback(Grid *pG)
00901  *  \brief PERIODIC boundary conditions, Inner x2 boundary (ibc_x2=4)
00902  */
00903 
00904 static void periodic_ix2_feedback(Grid *pG)
00905 {
00906   int dj = pG->je - pG->js + 1;
00907   int i,j,k;
00908 
00909   for (k=pG->ks; k<=pG->ke; k++) {
00910     for (j=nghost; j<nghost+NGF; j++) {
00911       for (i=il; i<=iu; i++) {
00912         pG->Coup[k][j][i].fb1 += pG->Coup[k][j+dj][i].fb1;
00913         pG->Coup[k][j][i].fb2 += pG->Coup[k][j+dj][i].fb2;
00914         pG->Coup[k][j][i].fb3 += pG->Coup[k][j+dj][i].fb3;
00915 
00916         pG->Coup[k][j][i].Eloss += pG->Coup[k][j+dj][i].Eloss;
00917       }
00918     }
00919   }
00920 
00921   return;
00922 }
00923 
00924 /*----------------------------------------------------------------------------*/
00925 /*! \fn static void periodic_ox2_feedback(Grid *pG)
00926  *  \brief PERIODIC boundary conditions,Outer x2 boundary (obc_x2=4)
00927  */
00928 
00929 static void periodic_ox2_feedback(Grid *pG)
00930 {
00931   int dj = pG->je - pG->js + 1;
00932   int i,j,k;
00933 
00934   for (k=pG->ks; k<=pG->ke; k++) {
00935     for (j=nghost-NGF; j<nghost; j++) {
00936       for (i=il; i<=iu; i++) {
00937         pG->Coup[k][j+dj][i].fb1 += pG->Coup[k][j][i].fb1;
00938         pG->Coup[k][j+dj][i].fb2 += pG->Coup[k][j][i].fb2;
00939         pG->Coup[k][j+dj][i].fb3 += pG->Coup[k][j][i].fb3;
00940 
00941         pG->Coup[k][j+dj][i].Eloss += pG->Coup[k][j][i].Eloss;
00942       }
00943     }
00944   }
00945 
00946   return;
00947 }
00948 
00949 /*----------------------------------------------------------------------------*/
00950 /*! \fn static void periodic_ix1_feedback(Grid *pG)
00951  *  \brief PERIODIC boundary conditions, Inner x1 boundary (ibc_x1=4)
00952  */
00953 
00954 static void periodic_ix1_feedback(Grid *pG)
00955 {
00956   int di = pG->ie - pG->is + 1;
00957   int i,j,k;
00958 
00959   for (k=pG->ks; k<=pG->ke; k++) {
00960     for (j=pG->js; j<=pG->je; j++) {
00961       for (i=nghost; i<nghost+NGF; i++) {
00962         pG->Coup[k][j][i].fb1 += pG->Coup[k][j][i+di].fb1;
00963         pG->Coup[k][j][i].fb2 += pG->Coup[k][j][i+di].fb2;
00964         pG->Coup[k][j][i].fb3 += pG->Coup[k][j][i+di].fb3;
00965 
00966         pG->Coup[k][j][i].Eloss += pG->Coup[k][j][i+di].Eloss;
00967       }
00968     }
00969   }
00970 
00971   return;
00972 }
00973 
00974 /*----------------------------------------------------------------------------*/
00975 /*! \fn static void periodic_ox1_feedback(Grid *pG)
00976  *  \brief PERIODIC boundary conditions, Outer x1 boundary (obc_x1=4)
00977  */
00978 
00979 static void periodic_ox1_feedback(Grid *pG)
00980 {
00981   int di = pG->ie - pG->is + 1;
00982   int i,j,k;
00983 
00984   for (k=pG->ks; k<=pG->ke; k++) {
00985     for (j=pG->js; j<=pG->je; j++) {
00986       for (i=nghost-NGF; i<nghost; i++) {
00987         pG->Coup[k][j][i+di].fb1 += pG->Coup[k][j][i].fb1;
00988         pG->Coup[k][j][i+di].fb2 += pG->Coup[k][j][i].fb2;
00989         pG->Coup[k][j][i+di].fb3 += pG->Coup[k][j][i].fb3;
00990 
00991         pG->Coup[k][j][i+di].Eloss += pG->Coup[k][j][i].Eloss;
00992       }
00993     }
00994   }
00995 
00996   return;
00997 }
00998 
00999 #ifdef MPI_PARALLEL  /* This ifdef wraps the next 12 funs; ~400 lines */
01000 
01001 /*----------------------------------------------------------------------------*/
01002 /*! \fn static void send_ix3_feedback(Grid *pG)
01003  *  \brief MPI_SEND of boundary conditions, Inner x3 boundary -- send left
01004  */
01005 
01006 static void send_ix3_feedback(Grid *pG)
01007 {
01008   int i,j,k,cnt,err;
01009   GPCouple *pq;
01010   double *pd = send_buf;
01011 
01012 /* Pack feedback data into send buffer */
01013   cnt = (iu-il+1)*(ju-jl+1)*NGF*NVAR_F;
01014 
01015   for (k=nghost-NGF; k<nghost; k++) {
01016     for (j=jl; j<=ju; j++) {
01017       for (i=il; i<=iu; i++) {
01018 
01019         pq = &(pG->Coup[k][j][i]);
01020 
01021         *(pd++) = pq->fb1;
01022         *(pd++) = pq->fb2;
01023         *(pd++) = pq->fb3;
01024 
01025         *(pd++) = pq->Eloss;
01026       }
01027     }
01028   }
01029 
01030 /* send contents of buffer to the neighboring grid on L-x3 */
01031   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx3_id, 
01032                                 boundary_cells_tag, MPI_COMM_WORLD);
01033   if(err) ath_error("[send_ix3_feedback]: MPI_Send error = %d\n",err);
01034 
01035   return;
01036 }
01037 
01038 /*----------------------------------------------------------------------------*/
01039 /*! \fn static void send_ox3_feedback(Grid *pG)
01040  *  \brief MPI_SEND of boundary conditions, Outer x3 boundary -- send right
01041  */
01042 
01043 static void send_ox3_feedback(Grid *pG)
01044 {
01045   int i,j,k,cnt,err;
01046   GPCouple *pq;
01047   double *pd = send_buf;
01048 
01049 /* Pack feedback data into send buffer */
01050   cnt = (iu-il+1)*(ju-jl+1)*NGF*NVAR_F;
01051 
01052   for (k=pG->ke+1; k<=pG->ke+NGF; k++) {
01053     for (j=jl; j<=ju; j++) {
01054       for (i=il; i<=iu; i++) {
01055 
01056         pq = &(pG->Coup[k][j][i]);
01057 
01058         *(pd++) = pq->fb1;
01059         *(pd++) = pq->fb2;
01060         *(pd++) = pq->fb3;
01061 
01062         *(pd++) = pq->Eloss;
01063       }
01064     }
01065   }
01066 
01067 /* send contents of buffer to the neighboring grid on R-x3 */
01068   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx3_id,
01069                                 boundary_cells_tag, MPI_COMM_WORLD);
01070   if(err) ath_error("[send_ox3_feedback]: MPI_Send error = %d\n",err);
01071 
01072   return;
01073 }
01074 
01075 /*----------------------------------------------------------------------------*/
01076 /*! \fn static void send_ix2_feedback(Grid *pG)
01077  *  \brief MPI_SEND of boundary conditions, Inner x2 boundary -- send left
01078  */
01079 
01080 static void send_ix2_feedback(Grid *pG)
01081 {
01082   int i,j,k,cnt,err;
01083   GPCouple *pq;
01084   double *pd = send_buf;
01085 
01086 /* Pack feedback data into send buffer */
01087   cnt = (iu-il+1)*(pG->ke-pG->ks+1)*NGF*NVAR_F;
01088 
01089   for (k=pG->ks; k<=pG->ke; k++) {
01090     for (j=nghost-NGF; j<nghost; j++) {
01091       for (i=il; i<=iu; i++) {
01092 
01093         pq = &(pG->Coup[k][j][i]);
01094 
01095         *(pd++) = pq->fb1;
01096         *(pd++) = pq->fb2;
01097         *(pd++) = pq->fb3;
01098 
01099         *(pd++) = pq->Eloss;
01100       }
01101     }
01102   }
01103 
01104 /* send contents of buffer to the neighboring grid on L-x2 */
01105   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01106                                 boundary_cells_tag, MPI_COMM_WORLD);
01107   if(err) ath_error("[send_ix2_feedback]: MPI_Send error = %d\n",err);
01108 
01109   return;
01110 }
01111 
01112 /*----------------------------------------------------------------------------*/
01113 /*! \fn static void send_ox2_feedback(Grid *pG)
01114  *  \brief MPI_SEND of boundary conditions, Outer x2 boundary -- send right
01115  */
01116 
01117 static void send_ox2_feedback(Grid *pG)
01118 {
01119   int i,j,k,cnt,err;
01120   GPCouple *pq;
01121   double *pd = send_buf;
01122 
01123 /* Pack feedback data into send buffer */
01124   cnt = (iu-il+1)*(pG->ke-pG->ks+1)*NGF*NVAR_F;
01125 
01126   for (k=pG->ks; k<=pG->ke; k++) {
01127     for (j=pG->je+1; j<=pG->je+NGF; j++) {
01128       for (i=il; i<=iu; i++) {
01129 
01130         pq = &(pG->Coup[k][j][i]);
01131 
01132         *(pd++) = pq->fb1;
01133         *(pd++) = pq->fb2;
01134         *(pd++) = pq->fb3;
01135 
01136         *(pd++) = pq->Eloss;
01137       }
01138     }
01139   }
01140 
01141 /* send contents of buffer to the neighboring grid on R-x2 */
01142   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01143                                 boundary_cells_tag, MPI_COMM_WORLD);
01144   if(err) ath_error("[send_ox2_feedback]: MPI_Send error = %d\n",err);
01145 
01146   return;
01147 }
01148 
01149 /*----------------------------------------------------------------------------*/
01150 /*! \fn static void send_ix1_feedback(Grid *pG)
01151  *  \brief MPI_SEND of boundary conditions, Inner x1 boundary -- send left
01152  */
01153 
01154 static void send_ix1_feedback(Grid *pG)
01155 {
01156   int i,j,k,cnt,err;
01157   GPCouple *pq;
01158   double *pd = send_buf;
01159 
01160 /* Pack feedback data into send buffer */
01161   cnt = (pG->je-pG->js+1)*(pG->ke-pG->ks+1)*NGF*NVAR_F;
01162 
01163   for (k=pG->ks; k<=pG->ke; k++) {
01164     for (j=pG->js; j<=pG->je; j++) {
01165       for (i=nghost-NGF; i<nghost; i++) {
01166 
01167         pq = &(pG->Coup[k][j][i]);
01168 
01169         *(pd++) = pq->fb1;
01170         *(pd++) = pq->fb2;
01171         *(pd++) = pq->fb3;
01172 
01173         *(pd++) = pq->Eloss;
01174       }
01175     }
01176   }
01177 
01178 /* send contents of buffer to the neighboring grid on L-x1 */
01179   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx1_id,
01180                                 boundary_cells_tag, MPI_COMM_WORLD);
01181   if(err) ath_error("[send_ix1_feedback]: MPI_Send error = %d\n",err);
01182 
01183   return;
01184 }
01185 
01186 /*----------------------------------------------------------------------------*/
01187 /*! \fn static void send_ox1_feedback(Grid *pG)
01188  *  \brief MPI_SEND of boundary conditions, Outer x1 boundary -- send right
01189  */
01190 
01191 static void send_ox1_feedback(Grid *pG)
01192 {
01193   int i,j,k,cnt,err;
01194   GPCouple *pq;
01195   double *pd = send_buf;
01196 
01197 /* Pack feedback data into send buffer */
01198   cnt = (pG->je-pG->js+1)*(pG->ke-pG->ks+1)*NGF*NVAR_F;
01199 
01200   for (k=pG->ks; k<=pG->ke; k++) {
01201     for (j=pG->js; j<=pG->je; j++) {
01202       for (i=pG->ie+1; i<=pG->ie+NGF; i++) {
01203 
01204         pq = &(pG->Coup[k][j][i]);
01205 
01206         *(pd++) = pq->fb1;
01207         *(pd++) = pq->fb2;
01208         *(pd++) = pq->fb3;
01209 
01210         *(pd++) = pq->Eloss;
01211       }
01212     }
01213   }
01214 
01215 /* send contents of buffer to the neighboring grid on R-x1 */
01216   err = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx1_id,
01217                                 boundary_cells_tag, MPI_COMM_WORLD);
01218   if(err) ath_error("[send_ox1_feedback]: MPI_Send error = %d\n",err);
01219 
01220   return;
01221 }
01222 
01223 /*----------------------------------------------------------------------------*/
01224 /*! \fn static void recv_ix3_feedback(Grid *pG, MPI_Request *prq)
01225  *  \brief MPI_RECEIVE of boundary conditions, Inner x3 boundary -- listen left
01226  */
01227 
01228 static void recv_ix3_feedback(Grid *pG, MPI_Request *prq)
01229 {
01230   int i,j,k,err;
01231   GPCouple *pq;
01232   MPI_Status stat;
01233   double *pd = recv_buf;
01234 
01235 /* Wait to receive the input data from the left grid */
01236   err = MPI_Wait(prq, &stat);
01237   if(err) ath_error("[receive_ix3_feedback]: MPI_Wait error = %d\n",err);
01238 
01239 /* Manually unpack the data from the receive buffer */
01240   for (k=pG->ks; k<pG->ks+NGF; k++){
01241     for (j=jl; j<=ju; j++){
01242       for (i=il; i<=iu; i++){
01243         /* Get a pointer to the Gas cell */
01244         pq = &(pG->Coup[k][j][i]);
01245 
01246         pq->fb1 += *(pd++);
01247         pq->fb2 += *(pd++);
01248         pq->fb3 += *(pd++);
01249 
01250         pq->Eloss += *(pd++);
01251       }
01252     }
01253   }
01254 
01255   return;
01256 }
01257 
01258 /*----------------------------------------------------------------------------*/
01259 /*! \fn static void recv_ox3_feedback(Grid *pG, MPI_Request *prq)
01260  *  \brief MPI_RECEIVE of boundary conditions, Outer x3 boundary -- listen right
01261  */
01262 
01263 static void recv_ox3_feedback(Grid *pG, MPI_Request *prq)
01264 {
01265   int i,j,k,err;
01266   GPCouple *pq;
01267   MPI_Status stat;
01268   double *pd = recv_buf;
01269 
01270 /* Wait to receive the input data from the right grid */
01271   err = MPI_Wait(prq, &stat);
01272   if(err) ath_error("[receive_ox3_feedback]: MPI_Wait error = %d\n",err);
01273 
01274 /* Manually unpack the data from the receive buffer */
01275   for (k=pG->ke-NGF+1; k<=pG->ke; k++){
01276     for (j=jl; j<=ju; j++){
01277       for (i=il; i<=iu; i++){
01278         /* Get a pointer to the Gas cell */
01279         pq = &(pG->Coup[k][j][i]);
01280 
01281         pq->fb1 += *(pd++);
01282         pq->fb2 += *(pd++);
01283         pq->fb3 += *(pd++);
01284 
01285         pq->Eloss += *(pd++);
01286       }
01287     }
01288   }
01289 
01290   return;
01291 }
01292 
01293 /*----------------------------------------------------------------------------*/
01294 /*! \fn static void recv_ix2_feedback(Grid *pG, MPI_Request *prq)
01295  *  \brief MPI_RECEIVE of boundary conditions, Inner x2 boundary -- listen left
01296  */
01297 
01298 static void recv_ix2_feedback(Grid *pG, MPI_Request *prq)
01299 {
01300   int i,j,k,err;
01301   GPCouple *pq;
01302   MPI_Status stat;
01303   double *pd = recv_buf;
01304 
01305 /* Wait to receive the input data from the left grid */
01306   err = MPI_Wait(prq, &stat);
01307   if(err) ath_error("[receive_ix2_feedback]: MPI_Wait error = %d\n",err);
01308 
01309 /* Manually unpack the data from the receive buffer */
01310   for (k=pG->ks; k<=pG->ke; k++){
01311     for (j=pG->js; j<pG->js+NGF; j++){
01312       for (i=il; i<=iu; i++){
01313         /* Get a pointer to the Gas cell */
01314         pq = &(pG->Coup[k][j][i]);
01315 
01316         pq->fb1 += *(pd++);
01317         pq->fb2 += *(pd++);
01318         pq->fb3 += *(pd++);
01319 
01320         pq->Eloss += *(pd++);
01321       }
01322     }
01323   }
01324 
01325   return;
01326 }
01327 
01328 /*----------------------------------------------------------------------------*/
01329 /*! \fn static void recv_ox2_feedback(Grid *pG, MPI_Request *prq)
01330  *  \brief MPI_RECEIVE of boundary conditions, Outer x2 boundary -- listen right
01331  */
01332 
01333 static void recv_ox2_feedback(Grid *pG, MPI_Request *prq)
01334 {
01335   int i,j,k,err;
01336   GPCouple *pq;
01337   MPI_Status stat;
01338   double *pd = recv_buf;
01339 
01340 /* Wait to receive the input data from the right grid */
01341   err = MPI_Wait(prq, &stat);
01342   if(err) ath_error("[receive_ox2_feedback]: MPI_Wait error = %d\n",err);
01343 
01344 /* Manually unpack the data from the receive buffer */
01345   for (k=pG->ks; k<=pG->ke; k++){
01346     for (j=pG->je-NGF+1; j<=pG->je; j++){
01347       for (i=il; i<=iu; i++){
01348         /* Get a pointer to the Gas cell */
01349         pq = &(pG->Coup[k][j][i]);
01350 
01351         pq->fb1 += *(pd++);
01352         pq->fb2 += *(pd++);
01353         pq->fb3 += *(pd++);
01354 
01355         pq->Eloss += *(pd++);
01356       }
01357     }
01358   }
01359 
01360   return;
01361 }
01362 
01363 /*----------------------------------------------------------------------------*/
01364 /*! \fn static void recv_ix1_feedback(Grid *pG, MPI_Request *prq)
01365  *  \brief MPI_RECEIVE of boundary conditions, Inner x1 boundary -- listen left
01366  */
01367 
01368 static void recv_ix1_feedback(Grid *pG, MPI_Request *prq)
01369 {
01370   int i,j,k,err;
01371   GPCouple *pq;
01372   MPI_Status stat;
01373   double *pd = recv_buf;
01374 
01375 /* Wait to receive the input data from the left grid */
01376   err = MPI_Wait(prq, &stat);
01377   if(err) ath_error("[receive_ix1_feedback]: MPI_Wait error = %d\n",err);
01378 
01379 /* Manually unpack the data from the receive buffer */
01380   for (k=pG->ks; k<=pG->ke; k++){
01381     for (j=pG->js; j<=pG->je; j++){
01382       for (i=pG->is; i<pG->is+NGF; i++){
01383         /* Get a pointer to the Gas cell */
01384         pq = &(pG->Coup[k][j][i]);
01385 
01386         pq->fb1 += *(pd++);
01387         pq->fb2 += *(pd++);
01388         pq->fb3 += *(pd++);
01389 
01390         pq->Eloss += *(pd++);
01391       }
01392     }
01393   }
01394 
01395   return;
01396 }
01397 
01398 /*----------------------------------------------------------------------------*/
01399 /*! \fn static void recv_ox1_feedback(Grid *pG, MPI_Request *prq)
01400  *  \brief MPI_RECEIVE of boundary conditions, Outer x1 boundary -- listen right
01401  */
01402 
01403 static void recv_ox1_feedback(Grid *pG, MPI_Request *prq)
01404 {
01405   int i,j,k,err;
01406   GPCouple *pq;
01407   MPI_Status stat;
01408   double *pd = recv_buf;
01409 
01410 /* Wait to receive the input data from the left grid */
01411   err = MPI_Wait(prq, &stat);
01412   if(err) ath_error("[receive_ox1_feedback]: MPI_Wait error = %d\n",err);
01413 
01414 /* Manually unpack the data from the receive buffer */
01415   for (k=pG->ks; k<=pG->ke; k++){
01416     for (j=pG->js; j<=pG->je; j++){
01417       for (i=pG->ie-NGF+1; i<=pG->ie; i++){
01418         /* Get a pointer to the Gas cell */
01419         pq = &(pG->Coup[k][j][i]);
01420 
01421         pq->fb1 += *(pd++);
01422         pq->fb2 += *(pd++);
01423         pq->fb3 += *(pd++);
01424 
01425         pq->Eloss += *(pd++);
01426       }
01427     }
01428   }
01429 
01430   return;
01431 }
01432 
01433 #endif /* MPI_PARALLEL */
01434 
01435 #ifdef SHEARING_BOX
01436 
01437 /*----------------------------------------------------------------------------*/
01438 /*! \fn static void shearingbox_ix1_feedback(Grid *pG, Domain *pD)
01439  *  \brief Exchange feedback for 3D shearing box, Inner x1.
01440  *
01441  * Note that here we only need to shear the feedback array by an integer number
01442  * of cells.
01443  */
01444 static void shearingbox_ix1_feedback(Grid *pG, Domain *pD)
01445 {
01446   return;
01447 }
01448 
01449 /*----------------------------------------------------------------------------*/
01450 /*! \fn static void shearingbox_ox1_feedback(Grid *pG, Domain *pD)
01451  *  \brief Exchange feedback for 3D shearing box, Outer x1.
01452  *
01453  * Note that here we only need to shear the feedback array by an integer number
01454  * of cells.
01455  */
01456 static void shearingbox_ox1_feedback(Grid *pG, Domain *pD)
01457 {
01458   return;
01459 }
01460 
01461 #endif /* SHEARING_BOX */
01462 
01463 #endif /* FEEDBACK */

Generated on Mon Sep 27 2010 23:03:07 for Athena by  doxygen 1.7.1