00001 #include "../copyright.h"
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #include <stdio.h>
00020 #include <stdlib.h>
00021 #include "../defs.h"
00022 #include "../athena.h"
00023 #include "prototypes.h"
00024 #include "../prototypes.h"
00025
00026
00027 #ifdef SELF_GRAVITY
00028
00029 #ifdef MPI_PARALLEL
00030
00031 static double **send_buf = NULL, **recv_buf = NULL;
00032 static MPI_Request *recv_rq, *send_rq;
00033 #endif
00034
00035
00036 static VGFun_t ix1_GBCFun = NULL, ox1_GBCFun = NULL;
00037 static VGFun_t ix2_GBCFun = NULL, ox2_GBCFun = NULL;
00038 static VGFun_t ix3_GBCFun = NULL, ox3_GBCFun = NULL;
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048 static void reflect_Phi_ix1(GridS *pG);
00049 static void reflect_Phi_ox1(GridS *pG);
00050 static void reflect_Phi_ix2(GridS *pG);
00051 static void reflect_Phi_ox2(GridS *pG);
00052 static void reflect_Phi_ix3(GridS *pG);
00053 static void reflect_Phi_ox3(GridS *pG);
00054
00055 static void periodic_Phi_ix1(GridS *pG);
00056 static void periodic_Phi_ox1(GridS *pG);
00057 static void periodic_Phi_ix2(GridS *pG);
00058 static void periodic_Phi_ox2(GridS *pG);
00059 static void periodic_Phi_ix3(GridS *pG);
00060 static void periodic_Phi_ox3(GridS *pG);
00061
00062 static void ProlongateLater(GridS *pG);
00063
00064 #ifdef MPI_PARALLEL
00065 static void pack_Phi_ix1(GridS *pG);
00066 static void pack_Phi_ox1(GridS *pG);
00067 static void pack_Phi_ix2(GridS *pG);
00068 static void pack_Phi_ox2(GridS *pG);
00069 static void pack_Phi_ix3(GridS *pG);
00070 static void pack_Phi_ox3(GridS *pG);
00071
00072 static void unpack_Phi_ix1(GridS *pG);
00073 static void unpack_Phi_ox1(GridS *pG);
00074 static void unpack_Phi_ix2(GridS *pG);
00075 static void unpack_Phi_ox2(GridS *pG);
00076 static void unpack_Phi_ix3(GridS *pG);
00077 static void unpack_Phi_ox3(GridS *pG);
00078 #endif
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095 void bvals_grav(DomainS *pD)
00096 {
00097 GridS *pGrid = (pD->Grid);
00098 #ifdef SHEARING_BOX
00099 int myL,myM,myN;
00100 #endif
00101 #ifdef MPI_PARALLEL
00102 int cnt1, cnt2, cnt3, cnt, ierr, mIndex;
00103 #endif
00104
00105
00106
00107
00108 if (pGrid->Nx[0] > 1){
00109
00110 #ifdef MPI_PARALLEL
00111 cnt2 = pGrid->Nx2 > 1 ? pGrid->Nx2 + 1 : 1;
00112 cnt3 = pGrid->Nx3 > 1 ? pGrid->Nx3 + 1 : 1;
00113 cnt = nghost*cnt2*cnt3;
00114
00115
00116 if (pGrid->rx1_id >= 0 && pGrid->lx1_id >= 0) {
00117
00118
00119 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, LtoR_tag,
00120 pD->Comm_Domain, &(recv_rq[0]));
00121 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, RtoL_tag,
00122 pD->Comm_Domain, &(recv_rq[1]));
00123
00124
00125 pack_ix1(pGrid);
00126 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, RtoL_tag,
00127 pD->Comm_Domain, &(send_rq[0]));
00128
00129 pack_ox1(pGrid);
00130 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, LtoR_tag,
00131 pD->Comm_Domain, &(send_rq[1]));
00132
00133
00134 ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00135
00136
00137 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00138 if (mIndex == 0) unpack_ix1(pGrid);
00139 if (mIndex == 1) unpack_ox1(pGrid);
00140 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00141 if (mIndex == 0) unpack_ix1(pGrid);
00142 if (mIndex == 1) unpack_ox1(pGrid);
00143
00144 }
00145
00146
00147 if (pGrid->rx1_id >= 0 && pGrid->lx1_id < 0) {
00148
00149
00150 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, RtoL_tag,
00151 pD->Comm_Domain, &(recv_rq[1]));
00152
00153
00154 pack_ox1(pGrid);
00155 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx1_id, LtoR_tag,
00156 pD->Comm_Domain, &(send_rq[1]));
00157
00158
00159 (*(ix1_GBCFun))(pGrid);
00160
00161
00162 ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00163
00164
00165 ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00166 unpack_ox1(pGrid);
00167
00168 }
00169
00170
00171 if (pGrid->rx1_id < 0 && pGrid->lx1_id >= 0) {
00172
00173
00174 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, LtoR_tag,
00175 pD->Comm_Domain, &(recv_rq[0]));
00176
00177
00178 pack_ix1(pGrid);
00179 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx1_id, RtoL_tag,
00180 pD->Comm_Domain, &(send_rq[0]));
00181
00182
00183 (*(ox1_GBCFun))(pGrid);
00184
00185
00186 ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00187
00188
00189 ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00190 unpack_ix1(pGrid);
00191
00192 }
00193 #endif
00194
00195
00196 if (pGrid->rx1_id < 0 && pGrid->lx1_id < 0) {
00197 (*(ix1_GBCFun))(pGrid);
00198 (*(ox1_GBCFun))(pGrid);
00199 }
00200
00201 }
00202
00203
00204
00205
00206 if (pGrid->Nx[1] > 1){
00207
00208 #ifdef MPI_PARALLEL
00209 cnt1 = pGrid->Nx1 > 1 ? pGrid->Nx1 + 2*nghost : 1;
00210 cnt3 = pGrid->Nx3 > 1 ? pGrid->Nx3 + 1 : 1;
00211 cnt = nghost*cnt1*cnt3;
00212
00213
00214 if (pGrid->rx2_id >= 0 && pGrid->lx2_id >= 0) {
00215
00216
00217 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, LtoR_tag,
00218 pD->Comm_Domain, &(recv_rq[0]));
00219 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, RtoL_tag,
00220 pD->Comm_Domain, &(recv_rq[1]));
00221
00222
00223 pack_ix2(pGrid);
00224 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, RtoL_tag,
00225 pD->Comm_Domain, &(send_rq[0]));
00226
00227 pack_ox2(pGrid);
00228 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, LtoR_tag,
00229 pD->Comm_Domain, &(send_rq[1]));
00230
00231
00232 ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00233
00234
00235 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00236 if (mIndex == 0) unpack_ix2(pGrid);
00237 if (mIndex == 1) unpack_ox2(pGrid);
00238 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00239 if (mIndex == 0) unpack_ix2(pGrid);
00240 if (mIndex == 1) unpack_ox2(pGrid);
00241
00242 }
00243
00244
00245 if (pGrid->rx2_id >= 0 && pGrid->lx2_id < 0) {
00246
00247
00248 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, RtoL_tag,
00249 pD->Comm_Domain, &(recv_rq[1]));
00250
00251
00252 pack_ox2(pGrid);
00253 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx2_id, LtoR_tag,
00254 pD->Comm_Domain, &(send_rq[1]));
00255
00256
00257 (*(ix2_GBCFun))(pGrid);
00258
00259
00260 ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00261
00262
00263 ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00264 unpack_ox2(pGrid);
00265
00266 }
00267
00268
00269 if (pGrid->rx2_id < 0 && pGrid->lx2_id >= 0) {
00270
00271
00272 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, LtoR_tag,
00273 pD->Comm_Domain, &(recv_rq[0]));
00274
00275
00276 pack_ix2(pGrid);
00277 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx2_id, RtoL_tag,
00278 pD->Comm_Domain, &(send_rq[0]));
00279
00280
00281 (*(ox2_GBCFun))(pGrid);
00282
00283
00284 ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00285
00286
00287 ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00288 unpack_ix2(pGrid);
00289
00290 }
00291 #endif
00292
00293
00294 if (pGrid->rx2_id < 0 && pGrid->lx2_id < 0) {
00295 (*(ix2_GBCFun))(pGrid);
00296 (*(ox2_GBCFun))(pGrid);
00297 }
00298
00299
00300 #ifdef SHEARING_BOX
00301 get_myGridIndex(pD, myID_Comm_world, &myL, &myM, &myN);
00302 if (my_iproc == 0) {
00303 ShearingSheet_grav_ix1(pGrid, pDomain);
00304 }
00305 if (my_iproc == (pDomain->NGrid_x1-1)) {
00306 ShearingSheet_grav_ox1(pGrid, pDomain);
00307 }
00308 #endif
00309
00310 }
00311
00312
00313
00314
00315 if (pGrid->Nx[2] > 1){
00316
00317 #ifdef MPI_PARALLEL
00318 cnt1 = pGrid->Nx1 > 1 ? pGrid->Nx1 + 2*nghost : 1;
00319 cnt2 = pGrid->Nx2 > 1 ? pGrid->Nx2 + 2*nghost : 1;
00320 cnt = nghost*cnt1*cnt2;
00321
00322
00323 if (pGrid->rx3_id >= 0 && pGrid->lx3_id >= 0) {
00324
00325
00326 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, LtoR_tag,
00327 pD->Comm_Domain, &(recv_rq[0]));
00328 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, RtoL_tag,
00329 pD->Comm_Domain, &(recv_rq[1]));
00330
00331
00332 pack_ix3(pGrid);
00333 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, RtoL_tag,
00334 pD->Comm_Domain, &(send_rq[0]));
00335
00336 pack_ox3(pGrid);
00337 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, LtoR_tag,
00338 pD->Comm_Domain, &(send_rq[1]));
00339
00340
00341 ierr = MPI_Waitall(2, send_rq, MPI_STATUS_IGNORE);
00342
00343
00344 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00345 if (mIndex == 0) unpack_ix3(pGrid);
00346 if (mIndex == 1) unpack_ox3(pGrid);
00347 ierr = MPI_Waitany(2,recv_rq,&mIndex,MPI_STATUS_IGNORE);
00348 if (mIndex == 0) unpack_ix3(pGrid);
00349 if (mIndex == 1) unpack_ox3(pGrid);
00350
00351 }
00352
00353
00354 if (pGrid->rx3_id >= 0 && pGrid->lx3_id < 0) {
00355
00356
00357 ierr = MPI_Irecv(recv_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, RtoL_tag,
00358 pD->Comm_Domain, &(recv_rq[1]));
00359
00360
00361 pack_ox3(pGrid);
00362 ierr = MPI_Isend(send_buf[1], cnt, MPI_DOUBLE, pGrid->rx3_id, LtoR_tag,
00363 pD->Comm_Domain, &(send_rq[1]));
00364
00365
00366 (*(ix3_GBCFun))(pGrid);
00367
00368
00369 ierr = MPI_Wait(&(send_rq[1]), MPI_STATUS_IGNORE);
00370
00371
00372 ierr = MPI_Wait(&(recv_rq[1]), MPI_STATUS_IGNORE);
00373 unpack_ox3(pGrid);
00374
00375 }
00376
00377
00378 if (pGrid->rx3_id < 0 && pGrid->lx3_id >= 0) {
00379
00380
00381 ierr = MPI_Irecv(recv_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, LtoR_tag,
00382 pD->Comm_Domain, &(recv_rq[0]));
00383
00384
00385 pack_ix3(pGrid);
00386 ierr = MPI_Isend(send_buf[0], cnt, MPI_DOUBLE, pGrid->lx3_id, RtoL_tag,
00387 pD->Comm_Domain, &(send_rq[0]));
00388
00389
00390 (*(ox3_GBCFun))(pGrid);
00391
00392
00393 ierr = MPI_Wait(&(send_rq[0]), MPI_STATUS_IGNORE);
00394
00395
00396 ierr = MPI_Wait(&(recv_rq[0]), MPI_STATUS_IGNORE);
00397 unpack_ix3(pGrid);
00398 }
00399 #endif
00400
00401
00402 if (pGrid->rx3_id < 0 && pGrid->lx3_id < 0) {
00403 (*(ix3_GBCFun))(pGrid);
00404 (*(ox3_GBCFun))(pGrid);
00405 }
00406
00407 }
00408
00409 return;
00410 }
00411
00412
00413
00414
00415
00416
00417
00418 void bvals_grav_init(MeshS *pM)
00419 {
00420 GridS *pG;
00421 DomainS *pD;
00422 int i,nl,nd,irefine;
00423 #ifdef MPI_PARALLEL
00424 int myL,myM,myN,l,m,n,nx1t,nx2t,nx3t,size;
00425 int x1cnt=0, x2cnt=0, x3cnt=0;
00426 #endif
00427
00428
00429
00430 for (nl=0; nl<(pM->NLevels); nl++){
00431 for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
00432 if (pM->Domain[nl][nd].Grid != NULL) {
00433 pD = (DomainS*)&(pM->Domain[nl][nd]);
00434 pG = pM->Domain[nl][nd].Grid;
00435 irefine = 1;
00436 for (i=1;i<=nl;i++) irefine *= 2;
00437 #ifdef MPI_PARALLEL
00438
00439 get_myGridIndex(pD, myID_Comm_world, &myL, &myM, &myN);
00440 #endif
00441
00442
00443
00444 if(pG->Nx[0] > 1) {
00445
00446
00447
00448 if(ix1_GBCFun == NULL){
00449
00450
00451 if(pD->Disp[0] != 0) {
00452 pD->ix1_BCFun = ProlongateLater;
00453
00454
00455 } else {
00456 switch(pM->BCFlag_ix1){
00457
00458 case 1:
00459 ix1_GBCFun = reflect_Phi_ix1;
00460 break;
00461
00462 case 2:
00463 ath_error("[bvals_grav_init]: BCFlag_ix1 = 2 not implemented\n");
00464 break;
00465
00466 case 4:
00467 ix1_GBCFun = periodic_Phi_ix1;
00468 #ifdef MPI_PARALLEL
00469 if(pG->lx1_id < 0 && pD->NGrid_x1 > 1){
00470 pG->lx1_id = pD->GData[myN][myM][pD->NGrid[0]-1].ID_Comm_Domain;
00471 }
00472 #endif
00473 break;
00474
00475 case 5:
00476 ix1_GBCFun = reflect_Phi_ix1;
00477 break;
00478
00479 default:
00480 ath_error("[bvals_grav_init]: BCFlag_ix1 = %d unknown\n",
00481 pM->BCFlag_ix1);
00482 }
00483 }
00484 }
00485
00486
00487
00488 if(ox1_GBCFun == NULL){
00489
00490
00491 if((pD->Disp[0] + pD->Nx[0])/irefine != pM->Nx[0]) {
00492 pD->ox1_BCFun = ProlongateLater;
00493
00494
00495 } else {
00496 switch(pM->BCFlag_ox1){
00497
00498 case 1:
00499 ox1_GBCFun = reflect_Phi_ox1;
00500 break;
00501
00502 case 2:
00503 ath_error("[bvals_grav_init]: BCFlag_ox1 = 2 not implemented\n");
00504 break;
00505
00506 case 4:
00507 ox1_GBCFun = periodic_Phi_ox1;
00508 #ifdef MPI_PARALLEL
00509 if(pG->rx1_id < 0 && pD->NGrid_x1 > 1){
00510 pG->rx1_id = pD->GData[myN][myM][0].ID_Comm_Domain;
00511 }
00512 #endif
00513 break;
00514
00515 case 5:
00516 ox1_GBCFun = reflect_Phi_ox1;
00517 break;
00518
00519 default:
00520 ath_error("[bvals_grav_init]: BCFlag_ox1 = %d unknown\n",
00521 pM->BCFlag_ox1);
00522 }
00523 }
00524 }
00525 }
00526
00527
00528
00529 if(pG->Nx[1] > 1) {
00530
00531
00532
00533 if(ix2_GBCFun == NULL){
00534
00535
00536 if(pD->Disp[1] != 0) {
00537 pD->ix2_BCFun = ProlongateLater;
00538
00539
00540 } else {
00541 switch(pM->BCFlag_ix2){
00542
00543 case 1:
00544 ix2_GBCFun = reflect_Phi_ix2;
00545 break;
00546
00547 case 2:
00548 ath_error("[bvals_grav_init]: BCFlag_ix2 = 2 not implemented\n");
00549 break;
00550
00551 case 4:
00552 ix2_GBCFun = periodic_Phi_ix2;
00553 #ifdef MPI_PARALLEL
00554 if(pG->lx2_id < 0 && pD->NGrid_x2 > 1){
00555 pG->lx2_id = pD->GData[myN][pD->NGrid[1]-1][myL].ID_Comm_Domain;
00556 }
00557 #endif
00558 break;
00559
00560 case 5:
00561 ix2_GBCFun = reflect_Phi_ix2;
00562 break;
00563
00564 default:
00565 ath_error("[bvals_grav_init]: BCFlag_ix2 = %d unknown\n",
00566 pM->BCFlag_ix2);
00567 }
00568 }
00569 }
00570
00571
00572
00573 if(ox2_GBCFun == NULL){
00574
00575
00576 if((pD->Disp[1] + pD->Nx[1])/irefine != pM->Nx[1]) {
00577 pD->ox2_BCFun = ProlongateLater;
00578
00579
00580 } else {
00581 switch(pM->BCFlag_ox2){
00582
00583 case 1:
00584 ox2_GBCFun = reflect_Phi_ox2;
00585 break;
00586
00587 case 2:
00588 ath_error("[bvals_grav_init]: BCFlag_ox2 = 2 not implemented\n");
00589 break;
00590
00591 case 4:
00592 ox2_GBCFun = periodic_Phi_ox2;
00593 #ifdef MPI_PARALLEL
00594 if(pG->rx2_id < 0 && pD->NGrid_x2 > 1){
00595 pG->rx2_id = pD->GData[myN][0][myL].ID_Comm_Domain;
00596 }
00597 #endif
00598 break;
00599
00600 case 5:
00601 ox2_GBCFun = reflect_Phi_ox2;
00602 break;
00603
00604 default:
00605 ath_error("[bvals_grav_init]: BCFlag_ox2 = %d unknown\n",
00606 pM->BCFlag_ox2);
00607 }
00608 }
00609 }
00610 }
00611
00612
00613
00614 if(pG->Nx[2] > 1) {
00615
00616
00617
00618 if(ix3_GBCFun == NULL){
00619
00620
00621 if(pD->Disp[2] != 0) {
00622 pD->ix3_BCFun = ProlongateLater;
00623
00624
00625 } else {
00626 switch(pM->BCFlag_ix3){
00627
00628 case 1:
00629 ix3_GBCFun = reflect_Phi_ix3;
00630 break;
00631
00632 case 2:
00633 ath_error("[bvals_grav_init]: BCFlag_ix3 = 2 not defined\n");
00634 break;
00635
00636 case 4:
00637 ix3_GBCFun = periodic_Phi_ix3;
00638 #ifdef MPI_PARALLEL
00639 if(pG->lx3_id < 0 && pD->NGrid_x3 > 1){
00640 pG->lx3_id = pD->GData[pD->NGrid[2]-1][myM][myL].ID_Comm_Domain;
00641 }
00642 #endif
00643 break;
00644
00645 case 5:
00646 ix3_GBCFun = reflect_Phi_ix3;
00647 break;
00648
00649 default:
00650 ath_error("[bvals_grav_init]: BCFlag_ix3 = %d unknown\n",
00651 pM->BCFlag_ix3);
00652 }
00653 }
00654 }
00655
00656
00657
00658 if(ox3_GBCFun == NULL){
00659
00660
00661 if((pD->Disp[2] + pD->Nx[2])/irefine != pM->Nx[2]) {
00662 pD->ox3_BCFun = ProlongateLater;
00663
00664
00665 } else {
00666 switch(pM->BCFlag_ox3){
00667
00668 case 1:
00669 ox3_GBCFun = reflect_Phi_ox3;
00670 break;
00671
00672 case 2:
00673 ath_error("[bvals_grav_init]: BCFlag_ox3 = 2 not defined\n");
00674 break;
00675
00676 case 4:
00677 ox3_GBCFun = periodic_Phi_ox3;
00678 #ifdef MPI_PARALLEL
00679 if(pG->rx3_id < 0 && pD->NGrid_x3 > 1){
00680 pG->rx3_id = pD->GData[0][myM][myL].ID_Comm_Domain;
00681 }
00682 #endif
00683 break;
00684
00685 case 5:
00686 ox3_GBCFun = reflect_Phi_ox3;
00687 break;
00688
00689 default:
00690 ath_error("[bvals_grav_init]: BCFlag_ox3 = %d unknown\n",
00691 pM->BCFlag_ox3);
00692 }
00693 }
00694 }
00695 }
00696
00697
00698
00699 #ifdef MPI_PARALLEL
00700
00701 for (n=0; n<(pD->NGrid[2]); n++){
00702 for (m=0; m<(pD->NGrid[1]); m++){
00703 for (l=0; l<(pD->NGrid[0]); l++){
00704
00705
00706 if(pD->NGrid[0] > 1){
00707 nx2t = pD->GData[n][m][l].Nx[1];
00708 if(nx2t > 1) nx2t += 1;
00709
00710 nx3t = pD->GData[n][m][l].Nx[2];
00711 if(nx3t > 1) nx3t += 1;
00712
00713 if(nx2t*nx3t > x1cnt) x1cnt = nx2t*nx3t;
00714 }
00715
00716
00717 if(pD->NGrid[1] > 1){
00718 nx1t = pD->GData[n][m][l].Nx[0];
00719 if(nx1t > 1) nx1t += 2*nghost;
00720
00721 nx3t = pD->GData[n][m][l].Nx[2];
00722 if(nx3t > 1) nx3t += 1;
00723
00724 if(nx1t*nx3t > x2cnt) x2cnt = nx1t*nx3t;
00725 }
00726
00727
00728 if(pD->NGrid[2] > 1){
00729 nx1t = pD->GData[n][m][l].Nx[0];
00730 if(nx1t > 1) nx1t += 2*nghost;
00731
00732 nx2t = pD->GData[n][m][l].Nx[1];
00733 if(nx2t > 1) nx2t += 2*nghost;
00734
00735 if(nx1t*nx2t > x3cnt) x3cnt = nx1t*nx2t;
00736 }
00737 }
00738 }}
00739 #endif
00740
00741 }}}
00742
00743 #ifdef MPI_PARALLEL
00744
00745
00746 size = x1cnt > x2cnt ? x1cnt : x2cnt;
00747 size = x3cnt > size ? x3cnt : size;
00748
00749 size *= nghost;
00750
00751 if (size > 0) {
00752 if((send_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
00753 ath_error("[bvals_init]: Failed to allocate send buffer\n");
00754
00755 if((recv_buf = (double**)calloc_2d_array(2,size,sizeof(double))) == NULL)
00756 ath_error("[bvals_init]: Failed to allocate recv buffer\n");
00757 }
00758
00759 if((recv_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
00760 ath_error("[bvals_init]: Failed to allocate recv MPI_Request array\n");
00761 if((send_rq = (MPI_Request*) calloc_1d_array(2,sizeof(MPI_Request))) == NULL)
00762 ath_error("[bvals_init]: Failed to allocate send MPI_Request array\n");
00763
00764 #endif
00765
00766 return;
00767 }
00768
00769
00770
00771
00772
00773
00774 void bvals_grav_fun(DomainS *pD, enum BCDirection dir, VGFun_t prob_bc)
00775 {
00776 switch(dir){
00777 case left_x1:
00778 ix1_GBCFun = prob_bc;
00779 break;
00780 case right_x1:
00781 ox1_GBCFun = prob_bc;
00782 break;
00783 case left_x2:
00784 ix2_GBCFun = prob_bc;
00785 break;
00786 case right_x2:
00787 ox2_GBCFun = prob_bc;
00788 break;
00789 case left_x3:
00790 ix3_GBCFun = prob_bc;
00791 break;
00792 case right_x3:
00793 ox3_GBCFun = prob_bc;
00794 break;
00795 default:
00796 ath_perr(-1,"[bvals_grav_fun]: Unknown direction = %d\n",dir);
00797 exit(EXIT_FAILURE);
00798 }
00799 return;
00800 }
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816 static void reflect_Phi_ix1(GridS *pGrid)
00817 {
00818 int is = pGrid->is;
00819 int js = pGrid->js, je = pGrid->je;
00820 int ks = pGrid->ks, ke = pGrid->ke;
00821 int i,j,k;
00822
00823 for (k=ks; k<=ke; k++) {
00824 for (j=js; j<=je; j++) {
00825 for (i=1; i<=nghost; i++) {
00826 pGrid->Phi[k][j][is-i] = pGrid->Phi[k][j][is+(i-1)];
00827 }
00828 }
00829 }
00830
00831 return;
00832 }
00833
00834
00835
00836
00837
00838
00839 static void reflect_Phi_ox1(GridS *pGrid)
00840 {
00841 int ie = pGrid->ie;
00842 int js = pGrid->js, je = pGrid->je;
00843 int ks = pGrid->ks, ke = pGrid->ke;
00844 int i,j,k;
00845
00846 for (k=ks; k<=ke; k++) {
00847 for (j=js; j<=je; j++) {
00848 for (i=1; i<=nghost; i++) {
00849 pGrid->Phi[k][j][ie+i] = pGrid->Phi[k][j][ie-(i-1)];
00850 }
00851 }
00852 }
00853
00854 return;
00855 }
00856
00857
00858
00859
00860
00861
00862 static void reflect_Phi_ix2(GridS *pGrid)
00863 {
00864 int is = pGrid->is, ie = pGrid->ie;
00865 int js = pGrid->js;
00866 int ks = pGrid->ks, ke = pGrid->ke;
00867 int i,j,k;
00868
00869 for (k=ks; k<=ke; k++) {
00870 for (j=1; j<=nghost; j++) {
00871 for (i=is-nghost; i<=ie+nghost; i++) {
00872 pGrid->Phi[k][js-j][i] = pGrid->Phi[k][js+(j-1)][i];
00873 }
00874 }
00875 }
00876
00877 return;
00878 }
00879
00880
00881
00882
00883
00884
00885 static void reflect_Phi_ox2(GridS *pGrid)
00886 {
00887 int is = pGrid->is, ie = pGrid->ie;
00888 int je = pGrid->je;
00889 int ks = pGrid->ks, ke = pGrid->ke;
00890 int i,j,k;
00891
00892 for (k=ks; k<=ke; k++) {
00893 for (j=1; j<=nghost; j++) {
00894 for (i=is-nghost; i<=ie+nghost; i++) {
00895 pGrid->Phi[k][je+j][i] = pGrid->Phi[k][je-(j-1)][i];
00896 }
00897 }
00898 }
00899
00900 return;
00901 }
00902
00903
00904
00905
00906
00907
00908 static void reflect_Phi_ix3(GridS *pGrid)
00909 {
00910 int is = pGrid->is, ie = pGrid->ie;
00911 int js = pGrid->js, je = pGrid->je;
00912 int ks = pGrid->ks;
00913 int i,j,k;
00914
00915 for (k=1; k<=nghost; k++) {
00916 for (j=js-nghost; j<=je+nghost; j++) {
00917 for (i=is-nghost; i<=ie+nghost; i++) {
00918 pGrid->Phi[ks-k][j][i] = pGrid->Phi[ks+(k-1)][j][i];
00919 }
00920 }
00921 }
00922
00923 return;
00924 }
00925
00926
00927
00928
00929
00930
00931 static void reflect_Phi_ox3(GridS *pGrid)
00932 {
00933 int is = pGrid->is, ie = pGrid->ie;
00934 int js = pGrid->js, je = pGrid->je;
00935 int ke = pGrid->ke;
00936 int i,j,k;
00937
00938 for (k=1; k<=nghost; k++) {
00939 for (j=js-nghost; j<=je+nghost; j++) {
00940 for (i=is-nghost; i<=ie+nghost; i++) {
00941 pGrid->Phi[ke+k][j][i] = pGrid->Phi[ke-(k-1)][j][i];
00942 }
00943 }
00944 }
00945
00946 return;
00947 }
00948
00949
00950
00951
00952
00953
00954 static void periodic_Phi_ix1(GridS *pGrid)
00955 {
00956 int is = pGrid->is, ie = pGrid->ie;
00957 int js = pGrid->js, je = pGrid->je;
00958 int ks = pGrid->ks, ke = pGrid->ke;
00959 int i,j,k;
00960
00961 for (k=ks; k<=ke; k++) {
00962 for (j=js; j<=je; j++) {
00963 for (i=1; i<=nghost; i++) {
00964 pGrid->Phi[k][j][is-i] = pGrid->Phi[k][j][ie-(i-1)];
00965 }
00966 }
00967 }
00968
00969 return;
00970 }
00971
00972
00973
00974
00975
00976
00977 static void periodic_Phi_ox1(GridS *pGrid)
00978 {
00979 int is = pGrid->is, ie = pGrid->ie;
00980 int js = pGrid->js, je = pGrid->je;
00981 int ks = pGrid->ks, ke = pGrid->ke;
00982 int i,j,k;
00983
00984 for (k=ks; k<=ke; k++) {
00985 for (j=js; j<=je; j++) {
00986 for (i=1; i<=nghost; i++) {
00987 pGrid->Phi[k][j][ie+i] = pGrid->Phi[k][j][is+(i-1)];
00988 }
00989 }
00990 }
00991
00992 return;
00993 }
00994
00995
00996
00997
00998
00999
01000 static void periodic_Phi_ix2(GridS *pGrid)
01001 {
01002 int is = pGrid->is, ie = pGrid->ie;
01003 int js = pGrid->js, je = pGrid->je;
01004 int ks = pGrid->ks, ke = pGrid->ke;
01005 int i,j,k;
01006
01007 for (k=ks; k<=ke; k++) {
01008 for (j=1; j<=nghost; j++) {
01009 for (i=is-nghost; i<=ie+nghost; i++) {
01010 pGrid->Phi[k][js-j][i] = pGrid->Phi[k][je-(j-1)][i];
01011 }
01012 }
01013 }
01014 return;
01015 }
01016
01017
01018
01019
01020
01021
01022 static void periodic_Phi_ox2(GridS *pGrid)
01023 {
01024 int is = pGrid->is, ie = pGrid->ie;
01025 int js = pGrid->js, je = pGrid->je;
01026 int ks = pGrid->ks, ke = pGrid->ke;
01027 int i,j,k;
01028
01029 for (k=ks; k<=ke; k++) {
01030 for (j=1; j<=nghost; j++) {
01031 for (i=is-nghost; i<=ie+nghost; i++) {
01032 pGrid->Phi[k][je+j][i] = pGrid->Phi[k][js+(j-1)][i];
01033 }
01034 }
01035 }
01036
01037 return;
01038 }
01039
01040
01041
01042
01043
01044
01045 static void periodic_Phi_ix3(GridS *pGrid)
01046 {
01047 int is = pGrid->is, ie = pGrid->ie;
01048 int js = pGrid->js, je = pGrid->je;
01049 int ks = pGrid->ks, ke = pGrid->ke;
01050 int i,j,k;
01051
01052 for (k=1; k<=nghost; k++) {
01053 for (j=js-nghost; j<=je+nghost; j++) {
01054 for (i=is-nghost; i<=ie+nghost; i++) {
01055 pGrid->Phi[ks-k][j][i] = pGrid->Phi[ke-(k-1)][j][i];
01056 }
01057 }
01058 }
01059
01060 return;
01061 }
01062
01063
01064
01065
01066
01067
01068 static void periodic_Phi_ox3(GridS *pGrid)
01069 {
01070 int is = pGrid->is, ie = pGrid->ie;
01071 int js = pGrid->js, je = pGrid->je;
01072 int ks = pGrid->ks, ke = pGrid->ke;
01073 int i,j,k;
01074
01075 for (k=1; k<=nghost; k++) {
01076 for (j=js-nghost; j<=je+nghost; j++) {
01077 for (i=is-nghost; i<=ie+nghost; i++) {
01078 pGrid->Phi[ke+k][j][i] = pGrid->Phi[ks+(k-1)][j][i];
01079 }
01080 }
01081 }
01082
01083 return;
01084 }
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094 static void ProlongateLater(GridS *pGrid)
01095 {
01096 return;
01097 }
01098
01099 #ifdef MPI_PARALLEL
01100
01101
01102
01103
01104 static void pack_Phi_ix1(GridS *pG)
01105 {
01106 int is = pGrid->is, ie = pGrid->ie;
01107 int js = pGrid->js, je = pGrid->je;
01108 int ks = pGrid->ks, ke = pGrid->ke;
01109 int i,j,k;
01110 double *pSnd = send_buf[0];
01111
01112
01113 for (k=ks; k<=ke; k++){
01114 for (j=js; j<=je; j++){
01115 for (i=is; i<=is+(nghost-1); i++){
01116 *(pSnd++) = pG->Phi[k][j][i];
01117 }
01118 }
01119 }
01120
01121 return;
01122 }
01123
01124
01125
01126
01127
01128 static void pack_Phi_ox1(GridS *pG)
01129 {
01130 int is = pGrid->is, ie = pGrid->ie;
01131 int js = pGrid->js, je = pGrid->je;
01132 int ks = pGrid->ks, ke = pGrid->ke;
01133 int i,j,k;
01134 double *pSnd = send_buf[0];
01135
01136
01137 for (k=ks; k<=ke; k++){
01138 for (j=js; j<=je; j++){
01139 for (i=ie-(nghost-1); i<=ie; i++){
01140 *(pSnd++) = pG->Phi[k][j][i];
01141 }
01142 }
01143 }
01144
01145 return;
01146 }
01147
01148
01149
01150
01151
01152 static void pack_Phi_ix2(GridS *pG)
01153 {
01154 int is = pGrid->is, ie = pGrid->ie;
01155 int js = pGrid->js, je = pGrid->je;
01156 int ks = pGrid->ks, ke = pGrid->ke;
01157 int i,j,k;
01158 double *pSnd = send_buf[0];
01159
01160
01161 for (k=ks; k<=ke; k++){
01162 for (j=js; j<=js+(nghost-1); j++){
01163 for (i=is-nghost; i<=ie+nghost; i++){
01164 *(pSnd++) = pG->Phi[k][j][i];
01165 }
01166 }
01167 }
01168
01169 return;
01170 }
01171
01172
01173
01174
01175
01176 static void pack_Phi_ox2(GridS *pG)
01177 {
01178 int is = pGrid->is, ie = pGrid->ie;
01179 int js = pGrid->js, je = pGrid->je;
01180 int ks = pGrid->ks, ke = pGrid->ke;
01181 int i,j,k;
01182 double *pSnd = send_buf[0];
01183
01184
01185
01186 for (k=ks; k<=ke; k++){
01187 for (j=je-(nghost-1); j<=je; j++){
01188 for (i=is-nghost; i<=ie+nghost; i++){
01189 *(pSnd++) = pG->Phi[k][j][i];
01190 }
01191 }
01192 }
01193
01194 return;
01195 }
01196
01197
01198
01199
01200
01201 static void pack_Phi_ix3(GridS *pG)
01202 {
01203 int is = pGrid->is, ie = pGrid->ie;
01204 int js = pGrid->js, je = pGrid->je;
01205 int ks = pGrid->ks, ke = pGrid->ke;
01206 int i,j,k;
01207 double *pSnd = send_buf[0];
01208
01209
01210
01211 for (k=ks; k<=ks+(nghost-1); k++){
01212 for (j=js-nghost; j<=je+nghost; j++){
01213 for (i=is-nghost; i<=ie+nghost; i++){
01214 *(pSnd++) = pG->Phi[k][j][i];
01215 }
01216 }
01217 }
01218
01219
01220
01221 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx3_id,
01222 boundary_cells_tag, MPI_COMM_WORLD);
01223
01224 return;
01225 }
01226
01227
01228
01229
01230
01231 static void pack_Phi_ox3(GridS *pG)
01232 {
01233 int is = pGrid->is, ie = pGrid->ie;
01234 int js = pGrid->js, je = pGrid->je;
01235 int ks = pGrid->ks, ke = pGrid->ke;
01236 int i,j,k;
01237 double *pSnd = send_buf[0];
01238
01239
01240
01241 for (k=ke-(nghost-1); k<=ke; k++){
01242 for (j=js-nghost; j<=je+nghost; j++){
01243 for (i=is-nghost; i<=ie+nghost; i++){
01244 *(pSnd++) = pG->Phi[k][j][i];
01245 }
01246 }
01247 }
01248
01249 return;
01250 }
01251
01252
01253
01254
01255
01256 static void unpack_Phi_ix1(GridS *pG)
01257 {
01258 int is = pGrid->is;
01259 int js = pGrid->js, je = pGrid->je;
01260 int ks = pGrid->ks, ke = pGrid->ke;
01261 int i,j,k;
01262 double *pRcv = recv_buf[0];
01263
01264
01265
01266 for (k=ks; k<=ke; k++){
01267 for (j=js; j<=js; j++){
01268 for (i=is-nghost; i<=is-1; i++){
01269 pG->Phi[k][j][i] = *(pRcv++);
01270 }
01271 }
01272 }
01273
01274 return;
01275 }
01276
01277
01278
01279
01280
01281 static void unpack_Phi_ox1(GridS *pG)
01282 {
01283 int ie = pGrid->ie;
01284 int js = pGrid->js, je = pGrid->je;
01285 int ks = pGrid->ks, ke = pGrid->ke;
01286 int i,j,k;
01287 double *pRcv = recv_buf[0];
01288
01289
01290
01291 for (k=ks; k<=ke; k++){
01292 for (j=js; j<=je; j++){
01293 for (i=ie+1; i<=ie+nghost; i++){
01294 pG->Phi[k][j][i] = *(pRcv++);
01295 }
01296 }
01297 }
01298
01299 return;
01300 }
01301
01302
01303
01304
01305
01306 static void unpack_Phi_ix2(GridS *pG)
01307 {
01308 int is = pGrid->is, ie = pGrid->ie;
01309 int js = pGrid->js;
01310 int ks = pGrid->ks, ke = pGrid->ke;
01311 int i,j,k;
01312 double *pRcv = recv_buf[0];
01313
01314
01315
01316 for (k=ks; k<=ke; k++){
01317 for (j=js-nghost; j<=js-1; j++){
01318 for (i=is-nghost; i<=ie+nghost; i++){
01319 pG->Phi[k][j][i] = *(pRcv++);
01320 }
01321 }
01322 }
01323
01324 return;
01325 }
01326
01327
01328
01329
01330
01331 static void unpack_Phi_ox2(GridS *pG)
01332 {
01333 int is = pGrid->is, ie = pGrid->ie;
01334 int je = pGrid->je;
01335 int ks = pGrid->ks, ke = pGrid->ke;
01336 int i,j,k;
01337 double *pRcv = recv_buf[0];
01338
01339
01340
01341 for (k=ks; k<=ke; k++){
01342 for (j=je+1; j<=je+nghost; j++){
01343 for (i=is-nghost; i<=ie+nghost; i++){
01344 pG->Phi[k][j][i] = *(pRcv++);
01345 }
01346 }
01347 }
01348
01349 return;
01350 }
01351
01352
01353
01354
01355
01356 static void unpack_Phi_ix3(GridS *pG)
01357 {
01358 int is = pGrid->is, ie = pGrid->ie;
01359 int js = pGrid->js, je = pGrid->je;
01360 int ks = pGrid->ks;
01361 int i,j,k;
01362 double *pRcv = recv_buf[0];
01363
01364
01365
01366 for (k=ks-nghost; k<=ks-1; k++){
01367 for (j=js-nghost; j<=je+nghost; j++){
01368 for (i=is-nghost; i<=ie+nghost; i++){
01369 pG->Phi[k][j][i] = *(pRcv++);
01370 }
01371 }
01372 }
01373
01374 return;
01375 }
01376
01377
01378
01379
01380
01381 static void unpack_Phi_ox3(GridS *pG)
01382 {
01383 int is = pGrid->is, ie = pGrid->ie;
01384 int js = pGrid->js, je = pGrid->je;
01385 int ke = pGrid->ke;
01386 int i,j,k;
01387 double *pRcv = recv_buf[0];
01388
01389
01390
01391 for (k=ke+1; k<=ke+nghost; k++){
01392 for (j=js-nghost; j<=je+nghost; j++){
01393 for (i=is-nghost; i<=ie+nghost; i++){
01394 pG->Phi[k][j][i] = *(pRcv++);
01395 }
01396 }
01397 }
01398
01399 return;
01400 }
01401
01402 #endif
01403
01404 #endif