00001 #include "copyright.h"
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 #include <float.h>
00032 #include <math.h>
00033
00034 #include <stdlib.h>
00035 #include <string.h>
00036 #include "defs.h"
00037 #include "athena.h"
00038 #include "globals.h"
00039 #include "prototypes.h"
00040
00041
00042 #if defined(SHEARING_BOX) || (defined(CYLINDRICAL) && defined(FARGO))
00043
00044
00045 #ifdef BAROTROPIC
00046 #ifdef HYDRO
00047 enum {NREMAP = 4, NFARGO = 4};
00048 #endif
00049 #ifdef MHD
00050 enum {NREMAP = 8, NFARGO = 6};
00051 #endif
00052 #else
00053 #ifdef HYDRO
00054 enum {NREMAP = 5, NFARGO = 5};
00055 #endif
00056 #ifdef MHD
00057 enum {NREMAP = 9, NFARGO = 7};
00058 #endif
00059 #endif
00060
00061 #ifdef MHD
00062 #define NVAR_SHARE (NVAR + 3)
00063 #else
00064 #define NVAR_SHARE NVAR
00065 #endif
00066
00067
00068
00069
00070 typedef struct Remap_s{
00071 Real U[NREMAP];
00072 #if (NSCALARS > 0)
00073 Real s[NSCALARS];
00074 #endif
00075 }Remap;
00076
00077
00078
00079 typedef struct FCons_s{
00080 Real U[NFARGO];
00081 #if (NSCALARS > 0)
00082 Real s[NSCALARS];
00083 #endif
00084 }FConsS;
00085
00086
00087
00088 static Remap ***GhstZns=NULL, ***GhstZnsBuf=NULL;
00089
00090 static Real *U=NULL, *Flx=NULL;
00091
00092 #ifdef MHD
00093 static Real **tEyBuf=NULL;
00094 #endif
00095
00096 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
00097 static Real *Uhalf=NULL;
00098 #endif
00099
00100 #ifdef MPI_PARALLEL
00101 static double *send_buf = NULL, *recv_buf = NULL;
00102 #endif
00103 #ifdef FARGO
00104 int nfghost;
00105 static FConsS ***FargoVars=NULL, ***FargoFlx=NULL;
00106 #endif
00107
00108
00109
00110
00111
00112
00113 void RemapFlux(const Real *U,const Real eps,const int ji,const int jo, Real *F);
00114
00115 #endif
00116
00117 #ifdef SHEARING_BOX
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130 void ShearingSheet_ix1(DomainS *pD)
00131 {
00132 GridS *pG = pD->Grid;
00133 int is = pG->is, ie = pG->ie;
00134 int js = pG->js, je = pG->je;
00135 int ks = pG->ks, ke = pG->ke;
00136 int i,ii,j,k,ku,n,joffset,jremap;
00137 Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epsi;
00138 #ifdef MPI_PARALLEL
00139 int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
00140 int ierr,sendto_id,getfrom_id;
00141 double *pSnd,*pRcv;
00142 Remap *pRemap;
00143 ConsS *pCons;
00144 MPI_Request rq;
00145 #endif
00146
00147
00148
00149
00150 xmin = pD->RootMinX[0];
00151 xmax = pD->RootMaxX[0];
00152 Lx = xmax - xmin;
00153
00154 xmin = pD->RootMinX[1];
00155 xmax = pD->RootMaxX[1];
00156 Ly = xmax - xmin;
00157
00158 qomL = qshear*Omega_0*Lx;
00159 yshear = qomL*pG->time;
00160
00161
00162
00163
00164 deltay = fmod(yshear, Ly);
00165
00166
00167
00168
00169
00170 joffset = (int)(deltay/pG->dx2);
00171 epsi = (fmod(deltay,pG->dx2))/pG->dx2;
00172
00173
00174
00175
00176
00177 if (pG->Nx[2] > 1 || ShBoxCoord==xy) {
00178
00179 if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
00180 for(k=ks; k<=ku; k++) {
00181 for(j=js-nghost; j<=je+nghost; j++){
00182 for(i=0; i<nghost; i++){
00183 ii = is-nghost+i;
00184 GhstZns[k][i][j].U[0] = pG->U[k][j][ii].d;
00185 GhstZns[k][i][j].U[1] = pG->U[k][j][ii].M1;
00186 GhstZns[k][i][j].U[2] = pG->U[k][j][ii].M2;
00187 #ifndef FARGO
00188 GhstZns[k][i][j].U[2] += qomL*pG->U[k][j][ii].d;
00189 #endif
00190 GhstZns[k][i][j].U[3] = pG->U[k][j][ii].M3;
00191 #ifdef ADIABATIC
00192
00193 GhstZns[k][i][j].U[4] = pG->U[k][j][ii].E + (0.5/GhstZns[k][i][j].U[0])*
00194 (SQR(GhstZns[k][i][j].U[2]) - SQR(pG->U[k][j][ii].M2));
00195 #endif
00196 #ifdef MHD
00197 GhstZns[k][i][j].U[NREMAP-4] = pG->U[k][j][ii].B1c;
00198 GhstZns[k][i][j].U[NREMAP-3] = pG->B1i[k][j][ii];
00199 GhstZns[k][i][j].U[NREMAP-2] = pG->B2i[k][j][ii];
00200 GhstZns[k][i][j].U[NREMAP-1] = pG->B3i[k][j][ii];
00201 #endif
00202 #if (NSCALARS > 0)
00203 for(n=0; n<NSCALARS; n++) GhstZns[k][i][j].s[n] = pG->U[k][j][ii].s[n];
00204 #endif
00205 }
00206 }
00207 }
00208
00209
00210
00211
00212
00213 for(k=ks; k<=ku; k++) {
00214 for(i=0; i<nghost; i++){
00215
00216 for (n=0; n<(NREMAP); n++) {
00217 for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].U[n];
00218 RemapFlux(U,epsi,js,je+1,Flx);
00219 for(j=js; j<=je; j++){
00220 GhstZnsBuf[k][i][j].U[n] = GhstZns[k][i][j].U[n] - (Flx[j+1]-Flx[j]);
00221 }
00222 }
00223
00224 #if (NSCALARS > 0)
00225 for (n=0; n<(NSCALARS); n++) {
00226 for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].s[n];
00227 RemapFlux(U,epsi,js,je+1,Flx);
00228 for(j=js; j<=je; j++){
00229 GhstZnsBuf[k][i][j].s[n] = GhstZns[k][i][j].s[n] - (Flx[j+1]-Flx[j]);
00230 }
00231 }
00232 #endif
00233
00234 }
00235 }
00236
00237
00238
00239
00240
00241 if (pD->NGrid[1] == 1) {
00242
00243 for(k=ks; k<=ku; k++) {
00244 for(j=js; j<=je; j++){
00245 jremap = j - joffset;
00246 if (jremap < (int)js) jremap += pG->Nx[1];
00247
00248 for(i=0; i<nghost; i++){
00249 for (n=0; n<(NREMAP); n++) {
00250 GhstZns[k][i][j].U[n] = GhstZnsBuf[k][i][jremap].U[n];
00251 }
00252 #if (NSCALARS > 0)
00253 for (n=0; n<NSCALARS; n++) {
00254 GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00255 }
00256 #endif
00257 }
00258
00259 }
00260 }
00261
00262 #ifdef MPI_PARALLEL
00263 } else {
00264
00265
00266
00267
00268
00269
00270 get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
00271
00272
00273
00274 Ngrids = (int)(joffset/pG->Nx[1]);
00275 joverlap = joffset - Ngrids*pG->Nx[1];
00276
00277
00278
00279
00280
00281
00282
00283 if (joverlap != 0) {
00284
00285 jproc = my_jproc + (Ngrids + 1);
00286 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00287 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00288
00289 jproc = my_jproc - (Ngrids + 1);
00290 if (jproc < 0) jproc += pD->NGrid[1];
00291 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00292
00293
00294
00295
00296 cnt = nghost*joverlap*(ku-ks+1)*(NREMAP+NSCALARS);
00297
00298 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00299 shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00300
00301 pSnd = send_buf;
00302 for (k=ks; k<=ku; k++) {
00303 for (j=je-(joverlap-1); j<=je; j++) {
00304 for(i=0; i<nghost; i++){
00305
00306 pRemap = &(GhstZnsBuf[k][i][j]);
00307
00308 for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00309 #if (NSCALARS > 0)
00310 for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00311 #endif
00312 }
00313 }
00314 }
00315 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00316 shearing_sheet_ix1_tag, pD->Comm_Domain);
00317
00318
00319
00320
00321
00322 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00323
00324 pRcv = recv_buf;
00325 for (k=ks; k<=ku; k++) {
00326 for (j=js; j<=js+(joverlap-1); j++) {
00327 for(i=0; i<nghost; i++){
00328
00329 pRemap = &(GhstZns[k][i][j]);
00330
00331 for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00332 #if (NSCALARS > 0)
00333 for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00334 #endif
00335 }
00336 }
00337 }
00338
00339 }
00340
00341
00342
00343
00344
00345
00346 if (Ngrids == 0) {
00347
00348 for(k=ks; k<=ku; k++) {
00349 for(j=js+joverlap; j<=je; j++){
00350 jremap = j-joverlap;
00351 for(i=0; i<nghost; i++){
00352 for (n=0; n<(NREMAP); n++) {
00353 GhstZns[k][i][j].U[n] = GhstZnsBuf[k][i][jremap].U[n];
00354 }
00355 #if (NSCALARS > 0)
00356 for (n=0; n<NSCALARS; n++) {
00357 GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00358 }
00359 #endif
00360 }
00361 }
00362 }
00363
00364
00365
00366
00367
00368 } else {
00369
00370
00371
00372 jproc = my_jproc + Ngrids;
00373 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00374 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00375
00376 jproc = my_jproc - Ngrids;
00377 if (jproc < 0) jproc += pD->NGrid[1];
00378 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00379
00380 cnt = nghost*(pG->Nx[1]-joverlap)*(ku-ks+1)*(NREMAP+NSCALARS);
00381
00382 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00383 shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00384
00385 pSnd = send_buf;
00386 for (k=ks; k<=ku; k++) {
00387 for (j=js; j<=je-joverlap; j++) {
00388 for(i=0; i<nghost; i++){
00389
00390 pRemap = &(GhstZnsBuf[k][i][j]);
00391 for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00392 #if (NSCALARS > 0)
00393 for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00394 #endif
00395 }
00396 }
00397 }
00398 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00399 shearing_sheet_ix1_tag, pD->Comm_Domain);
00400
00401
00402
00403
00404 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00405
00406 pRcv = recv_buf;
00407 for (k=ks; k<=ku; k++) {
00408 for (j=js+joverlap; j<=je; j++) {
00409 for(i=0; i<nghost; i++){
00410
00411 pRemap = &(GhstZns[k][i][j]);
00412 for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00413 #if (NSCALARS > 0)
00414 for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00415 #endif
00416 }
00417 }
00418 }
00419 }
00420
00421 #endif
00422 }
00423
00424
00425
00426
00427 for(k=ks; k<=ke; k++) {
00428 for(j=js; j<=je; j++){
00429 for(i=0; i<nghost; i++){
00430 pG->U[k][j][is-nghost+i].d = GhstZns[k][i][j].U[0];
00431 pG->U[k][j][is-nghost+i].M1 = GhstZns[k][i][j].U[1];
00432 pG->U[k][j][is-nghost+i].M2 = GhstZns[k][i][j].U[2];
00433 pG->U[k][j][is-nghost+i].M3 = GhstZns[k][i][j].U[3];
00434 #ifdef ADIABATIC
00435 pG->U[k][j][is-nghost+i].E = GhstZns[k][i][j].U[4];
00436 #endif
00437 #ifdef MHD
00438 pG->U[k][j][is-nghost+i].B1c = GhstZns[k][i][j].U[NREMAP-4];
00439 pG->B1i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-3];
00440 pG->B2i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-2];
00441 pG->B3i[k][j][is-nghost+i] = GhstZns[k][i][j].U[NREMAP-1];
00442 #endif
00443 #if (NSCALARS > 0)
00444 for (n=0; n<NSCALARS; n++) {
00445 pG->U[k][j][is-nghost+i].s[n] = GhstZns[k][i][j].s[n];
00446 }
00447 #endif
00448 }
00449 }
00450 }
00451
00452
00453 #ifdef MHD
00454 if (pG->Nx[2] > 1) {
00455 for(j=js; j<=je; j++){
00456 for(i=0; i<nghost; i++){
00457 pG->B3i[ke+1][j][is-nghost+i] = GhstZns[ke+1][i][j].U[NREMAP-1];
00458 }
00459 }
00460 }
00461 #endif
00462
00463
00464
00465
00466
00467
00468 #ifdef MHD
00469 for (k=ks; k<=ke; k++) {
00470 for (j=js; j<=je; j++) {
00471 for(i=is-nghost; i<is; i++){
00472 pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
00473 }
00474 }
00475 }
00476 if (pG->Nx[2] > 1) {
00477 for (k=ks; k<=ke; k++) {
00478 for (j=js; j<=je; j++) {
00479 for(i=is-nghost; i<is; i++){
00480 pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
00481 }
00482 }
00483 }
00484 }
00485 #endif
00486
00487
00488
00489
00490
00491 if (pD->NGrid[1] == 1) {
00492
00493 for(k=ks; k<=ke; k++) {
00494 for(j=1; j<=nghost; j++){
00495 for(i=is-nghost; i<is; i++){
00496 pG->U[k][js-j][i] = pG->U[k][je-(j-1)][i];
00497 pG->U[k][je+j][i] = pG->U[k][js+(j-1)][i];
00498 #ifdef MHD
00499 pG->B1i[k][js-j][i] = pG->B1i[k][je-(j-1)][i];
00500 pG->B2i[k][js-j][i] = pG->B2i[k][je-(j-1)][i];
00501 pG->B3i[k][js-j][i] = pG->B3i[k][je-(j-1)][i];
00502
00503 pG->B1i[k][je+j][i] = pG->B1i[k][js+(j-1)][i];
00504 pG->B2i[k][je+j][i] = pG->B2i[k][js+(j-1)][i];
00505 pG->B3i[k][je+j][i] = pG->B3i[k][js+(j-1)][i];
00506 #endif
00507 }
00508 }
00509 }
00510 #ifdef MHD
00511 if (pG->Nx[2] > 1) {
00512 for (j=1; j<=nghost; j++) {
00513 for (i=is-nghost; i<is; i++) {
00514 pG->B3i[ke+1][js-j][i] = pG->B3i[ke+1][je-(j-1)][i];
00515 pG->B3i[ke+1][je+j][i] = pG->B3i[ke+1][js+(j-1)][i];
00516 }
00517 }
00518 }
00519 #endif
00520
00521 #ifdef MPI_PARALLEL
00522 } else {
00523
00524
00525
00526
00527
00528
00529
00530 cnt = nghost*nghost*(ku-ks+1)*NVAR_SHARE;
00531 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00532 shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00533
00534 pSnd = send_buf;
00535 for (k=ks; k<=ku; k++){
00536 for (j=je-nghost+1; j<=je; j++){
00537 for (i=is-nghost; i<is; i++){
00538
00539 pCons = &(pG->U[k][j][i]);
00540
00541 *(pSnd++) = pCons->d;
00542 *(pSnd++) = pCons->M1;
00543 *(pSnd++) = pCons->M2;
00544 *(pSnd++) = pCons->M3;
00545 #ifndef BAROTROPIC
00546 *(pSnd++) = pCons->E;
00547 #endif
00548 #ifdef MHD
00549 *(pSnd++) = pCons->B1c;
00550 *(pSnd++) = pCons->B2c;
00551 *(pSnd++) = pCons->B3c;
00552 *(pSnd++) = pG->B1i[k][j][i];
00553 *(pSnd++) = pG->B2i[k][j][i];
00554 *(pSnd++) = pG->B3i[k][j][i];
00555 #endif
00556 #if (NSCALARS > 0)
00557 for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
00558 #endif
00559 }
00560 }
00561 }
00562
00563
00564 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00565 shearing_sheet_ix1_tag, pD->Comm_Domain);
00566
00567
00568 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00569
00570 pRcv = recv_buf;
00571 for (k=ks; k<=ku; k++){
00572 for (j=js-nghost; j<=js-1; j++){
00573 for (i=is-nghost; i<is; i++){
00574
00575 pCons = &(pG->U[k][j][i]);
00576
00577 pCons->d = *(pRcv++);
00578 pCons->M1 = *(pRcv++);
00579 pCons->M2 = *(pRcv++);
00580 pCons->M3 = *(pRcv++);
00581 #ifndef BAROTROPIC
00582 pCons->E = *(pRcv++);
00583 #endif
00584 #ifdef MHD
00585 pCons->B1c = *(pRcv++);
00586 pCons->B2c = *(pRcv++);
00587 pCons->B3c = *(pRcv++);
00588 pG->B1i[k][j][i] = *(pRcv++);
00589 pG->B2i[k][j][i] = *(pRcv++);
00590 pG->B3i[k][j][i] = *(pRcv++);
00591 #endif
00592 #if (NSCALARS > 0)
00593 for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
00594 #endif
00595 }
00596 }
00597 }
00598
00599
00600 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
00601 shearing_sheet_ix1_tag, pD->Comm_Domain, &rq);
00602
00603 pSnd = send_buf;
00604 for (k=ks; k<=ku; k++){
00605 for (j=js; j<=js+nghost-1; j++){
00606 for (i=is-nghost; i<is; i++){
00607
00608 pCons = &(pG->U[k][j][i]);
00609
00610 *(pSnd++) = pCons->d;
00611 *(pSnd++) = pCons->M1;
00612 *(pSnd++) = pCons->M2;
00613 *(pSnd++) = pCons->M3;
00614 #ifndef BAROTROPIC
00615 *(pSnd++) = pCons->E;
00616 #endif
00617 #ifdef MHD
00618 *(pSnd++) = pCons->B1c;
00619 *(pSnd++) = pCons->B2c;
00620 *(pSnd++) = pCons->B3c;
00621 *(pSnd++) = pG->B1i[k][j][i];
00622 *(pSnd++) = pG->B2i[k][j][i];
00623 *(pSnd++) = pG->B3i[k][j][i];
00624 #endif
00625 #if (NSCALARS > 0)
00626 for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
00627 #endif
00628 }
00629 }
00630 }
00631
00632
00633 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
00634 shearing_sheet_ix1_tag, pD->Comm_Domain);
00635
00636
00637 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00638
00639 pRcv = recv_buf;
00640 for (k=ks; k<=ku; k++){
00641 for (j=je+1; j<=je+nghost; j++){
00642 for (i=is-nghost; i<is; i++){
00643
00644 pCons = &(pG->U[k][j][i]);
00645
00646 pCons->d = *(pRcv++);
00647 pCons->M1 = *(pRcv++);
00648 pCons->M2 = *(pRcv++);
00649 pCons->M3 = *(pRcv++);
00650 #ifndef BAROTROPIC
00651 pCons->E = *(pRcv++);
00652 #endif
00653 #ifdef MHD
00654 pCons->B1c = *(pRcv++);
00655 pCons->B2c = *(pRcv++);
00656 pCons->B3c = *(pRcv++);
00657 pG->B1i[k][j][i] = *(pRcv++);
00658 pG->B2i[k][j][i] = *(pRcv++);
00659 pG->B3i[k][j][i] = *(pRcv++);
00660 #endif
00661 #if (NSCALARS > 0)
00662 for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
00663 #endif
00664 }
00665 }
00666 }
00667 #endif
00668
00669 }
00670
00671
00672
00673
00674 #ifdef MHD
00675 for (k=ks; k<=ke; k++) {
00676 for(i=is-nghost; i<is; i++){
00677 pG->U[k][je ][i].B2c = 0.5*(pG->B2i[k][je+1][i]+pG->B2i[k][je][i]);
00678 pG->U[k][js-1][i].B2c = 0.5*(pG->B2i[k][js-1][i]+pG->B2i[k][js][i]);
00679 }
00680 }
00681 #endif
00682
00683 }
00684
00685
00686
00687
00688
00689
00690 #ifndef FARGO
00691 if (pG->Nx[2] == 1 && ShBoxCoord==xz) {
00692
00693 for (j=js-nghost; j<=je+nghost; j++) {
00694 for (i=1; i<=nghost; i++) {
00695 #ifdef ADIABATIC
00696
00697 pG->U[ks][j][is-i].E += (0.5/pG->U[ks][j][is-i].d)*
00698 (SQR((pG->U[ks][j][is-i].M3 + qomL*pG->U[ks][j][is-i].d))
00699 - SQR(pG->U[ks][j][is-i].M3));
00700 #endif
00701 pG->U[ks][j][is-i].M3 += qomL*pG->U[ks][j][is-i].d;
00702 }
00703 }
00704
00705 }
00706 #endif
00707
00708 return;
00709 }
00710
00711
00712
00713
00714
00715
00716
00717
00718
00719
00720
00721
00722
00723
00724 void ShearingSheet_ox1(DomainS *pD)
00725 {
00726 GridS *pG = pD->Grid;
00727 int is = pG->is, ie = pG->ie;
00728 int js = pG->js, je = pG->je;
00729 int ks = pG->ks, ke = pG->ke;
00730 int i,ii,j,k,ku,n,joffset,jremap;
00731 Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epso;
00732 #ifdef MPI_PARALLEL
00733 int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
00734 int ierr,sendto_id,getfrom_id;
00735 double *pSnd,*pRcv;
00736 Remap *pRemap;
00737 ConsS *pCons;
00738 MPI_Request rq;
00739 #endif
00740
00741
00742
00743
00744 xmin = pD->RootMinX[0];
00745 xmax = pD->RootMaxX[0];
00746 Lx = xmax - xmin;
00747
00748 xmin = pD->RootMinX[1];
00749 xmax = pD->RootMaxX[1];
00750 Ly = xmax - xmin;
00751
00752 qomL = qshear*Omega_0*Lx;
00753 yshear = qomL*pG->time;
00754
00755
00756
00757
00758 deltay = fmod(yshear, Ly);
00759
00760
00761
00762
00763
00764 joffset = (int)(deltay/pG->dx2);
00765 epso = -(fmod(deltay,pG->dx2))/pG->dx2;
00766
00767
00768
00769
00770
00771 if (pG->Nx[2] > 1 || ShBoxCoord==xy) {
00772
00773 if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
00774 for(k=ks; k<=ku; k++) {
00775 for(j=js-nghost; j<=je+nghost; j++){
00776 for(i=0; i<nghost; i++){
00777 ii = ie+1+i;
00778 GhstZns[k][i][j].U[0] = pG->U[k][j][ii].d;
00779 GhstZns[k][i][j].U[1] = pG->U[k][j][ii].M1;
00780 GhstZns[k][i][j].U[2] = pG->U[k][j][ii].M2;
00781 #ifndef FARGO
00782 GhstZns[k][i][j].U[2] -= qomL*pG->U[k][j][ii].d;
00783 #endif
00784 GhstZns[k][i][j].U[3] = pG->U[k][j][ii].M3;
00785 #ifdef ADIABATIC
00786
00787 GhstZns[k][i][j].U[4] = pG->U[k][j][ii].E + (0.5/GhstZns[k][i][j].U[0])*
00788 (SQR(GhstZns[k][i][j].U[2]) - SQR(pG->U[k][j][ii].M2));
00789 #endif
00790 #ifdef MHD
00791 GhstZns[k][i][j].U[NREMAP-4] = pG->U[k][j][ii].B1c;
00792 GhstZns[k][i][j].U[NREMAP-3] = pG->B1i[k][j][ii];
00793 GhstZns[k][i][j].U[NREMAP-2] = pG->B2i[k][j][ii];
00794 GhstZns[k][i][j].U[NREMAP-1] = pG->B3i[k][j][ii];
00795 #endif
00796 #if (NSCALARS > 0)
00797 for(n=0; n<NSCALARS; n++) GhstZns[k][i][j].s[n] = pG->U[k][j][ii].s[n];
00798 #endif
00799 }
00800 }
00801 }
00802
00803
00804
00805
00806
00807 for(k=ks; k<=ku; k++) {
00808 for(i=0; i<nghost; i++){
00809
00810 for (n=0; n<(NREMAP); n++) {
00811 for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].U[n];
00812 RemapFlux(U,epso,js,je+1,Flx);
00813 for(j=js; j<=je; j++){
00814 GhstZnsBuf[k][i][j].U[n] = GhstZns[k][i][j].U[n] - (Flx[j+1]-Flx[j]);
00815 }
00816 }
00817
00818 #if (NSCALARS > 0)
00819 for (n=0; n<(NSCALARS); n++) {
00820 for (j=js-nghost; j<=je+nghost; j++) U[j] = GhstZns[k][i][j].s[n];
00821 RemapFlux(U,epso,js,je+1,Flx);
00822 for(j=js; j<=je; j++){
00823 GhstZnsBuf[k][i][j].s[n] = GhstZns[k][i][j].s[n] - (Flx[j+1]-Flx[j]);
00824 }
00825 }
00826 #endif
00827
00828 }
00829 }
00830
00831
00832
00833
00834
00835 if (pD->NGrid[1] == 1) {
00836
00837 for(k=ks; k<=ku; k++) {
00838 for(j=js; j<=je; j++){
00839 jremap = j + joffset;
00840 if (jremap > (int)je) jremap -= pG->Nx[1];
00841
00842 for(i=0; i<nghost; i++){
00843 for (n=0; n<(NREMAP); n++) {
00844 GhstZns[k][i][j].U[n] = GhstZnsBuf[k][i][jremap].U[n];
00845 }
00846 #if (NSCALARS > 0)
00847 for (n=0; n<NSCALARS; n++) {
00848 GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00849 }
00850 #endif
00851 }
00852
00853 }
00854 }
00855
00856 #ifdef MPI_PARALLEL
00857 } else {
00858
00859
00860
00861
00862
00863
00864 get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
00865
00866
00867
00868 Ngrids = (int)(joffset/pG->Nx[1]);
00869 joverlap = joffset - Ngrids*pG->Nx[1];
00870
00871
00872
00873
00874
00875
00876
00877 if (joverlap != 0) {
00878
00879 jproc = my_jproc - (Ngrids + 1);
00880 if (jproc < 0) jproc += pD->NGrid[1];
00881 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00882
00883 jproc = my_jproc + (Ngrids + 1);
00884 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00885 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00886
00887
00888
00889
00890 cnt = nghost*joverlap*(ku-ks+1)*(NREMAP+NSCALARS);
00891
00892 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00893 shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
00894
00895 pSnd = send_buf;
00896 for (k=ks; k<=ku; k++) {
00897 for (j=js; j<=js+(joverlap-1); j++) {
00898 for(i=0; i<nghost; i++){
00899
00900 pRemap = &(GhstZnsBuf[k][i][j]);
00901
00902 for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00903 #if (NSCALARS > 0)
00904 for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00905 #endif
00906 }
00907 }
00908 }
00909 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00910 shearing_sheet_ox1_tag, pD->Comm_Domain);
00911
00912
00913
00914
00915
00916
00917
00918 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
00919
00920 pRcv = recv_buf;
00921 for (k=ks; k<=ku; k++) {
00922 for (j=je-(joverlap-1); j<=je; j++) {
00923 for(i=0; i<nghost; i++){
00924
00925 pRemap = &(GhstZns[k][i][j]);
00926
00927 for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
00928 #if (NSCALARS > 0)
00929 for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
00930 #endif
00931 }
00932 }
00933 }
00934
00935 }
00936
00937
00938
00939
00940
00941
00942 if (Ngrids == 0) {
00943
00944 for(k=ks; k<=ku; k++) {
00945 for(j=js; j<=je-joverlap; j++){
00946 jremap = j+joverlap;
00947 for(i=0; i<nghost; i++){
00948 for (n=0; n<(NREMAP); n++) {
00949 GhstZns[k][i][j].U[n] = GhstZnsBuf[k][i][jremap].U[n];
00950 }
00951 #if (NSCALARS > 0)
00952 for (n=0; n<NSCALARS; n++) {
00953 GhstZns[k][i][j].s[n] = GhstZnsBuf[k][i][jremap].s[n];
00954 }
00955 #endif
00956 }
00957 }
00958 }
00959
00960
00961
00962
00963
00964 } else {
00965
00966
00967
00968 jproc = my_jproc - Ngrids;
00969 if (jproc < 0) jproc += pD->NGrid[1];
00970 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00971
00972 jproc = my_jproc + Ngrids;
00973 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
00974 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
00975
00976 cnt = nghost*(pG->Nx[1]-joverlap)*(ku-ks+1)*(NREMAP+NSCALARS);
00977
00978 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
00979 shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
00980
00981 pSnd = send_buf;
00982 for (k=ks; k<=ku; k++) {
00983 for (j=js+joverlap; j<=je; j++) {
00984 for(i=0; i<nghost; i++){
00985
00986 pRemap = &(GhstZnsBuf[k][i][j]);
00987 for (n=0; n<NREMAP; n++) *(pSnd++) = pRemap->U[n];
00988 #if (NSCALARS > 0)
00989 for (n=0; n<NSCALARS; n++) *(pSnd++) = pRemap->s[n];
00990 #endif
00991 }
00992 }
00993 }
00994 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
00995 shearing_sheet_ox1_tag, pD->Comm_Domain);
00996
00997
00998
00999
01000 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01001
01002 pRcv = recv_buf;
01003 for (k=ks; k<=ku; k++) {
01004 for (j=js; j<=je-joverlap; j++) {
01005 for(i=0; i<nghost; i++){
01006
01007 pRemap = &(GhstZns[k][i][j]);
01008 for (n=0; n<NREMAP; n++) pRemap->U[n] = *(pRcv++);
01009 #if (NSCALARS > 0)
01010 for (n=0; n<NSCALARS; n++) pRemap->s[n] = *(pRcv++);
01011 #endif
01012 }
01013 }
01014 }
01015 }
01016
01017 #endif
01018 }
01019
01020
01021
01022
01023 for(k=ks; k<=ke; k++) {
01024 for(j=js; j<=je; j++){
01025 for(i=0; i<nghost; i++){
01026 pG->U[k][j][ie+1+i].d = GhstZns[k][i][j].U[0];
01027 pG->U[k][j][ie+1+i].M1 = GhstZns[k][i][j].U[1];
01028 pG->U[k][j][ie+1+i].M2 = GhstZns[k][i][j].U[2];
01029 pG->U[k][j][ie+1+i].M3 = GhstZns[k][i][j].U[3];
01030 #ifdef ADIABATIC
01031 pG->U[k][j][ie+1+i].E = GhstZns[k][i][j].U[4];
01032 #endif
01033 #ifdef MHD
01034 pG->U[k][j][ie+1+i].B1c = GhstZns[k][i][j].U[NREMAP-4];
01035 if(i>0) pG->B1i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-3];
01036 pG->B2i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-2];
01037 pG->B3i[k][j][ie+1+i] = GhstZns[k][i][j].U[NREMAP-1];
01038 #endif
01039 #if (NSCALARS > 0)
01040 for (n=0; n<NSCALARS; n++) {
01041 pG->U[k][j][ie+1+i].s[n] = GhstZns[k][i][j].s[n];
01042 }
01043 #endif
01044 }
01045 }
01046 }
01047
01048
01049 #ifdef MHD
01050 if (pG->Nx[2] > 1) {
01051 for(j=js; j<=je; j++){
01052 for(i=0; i<nghost; i++){
01053 pG->B3i[ke+1][j][ie+1+i] = GhstZns[ke+1][i][j].U[NREMAP-1];
01054 }
01055 }
01056 }
01057 #endif
01058
01059
01060
01061
01062
01063
01064 #ifdef MHD
01065 for (k=ks; k<=ke; k++) {
01066 for (j=js; j<=je; j++) {
01067 for(i=ie+1; i<=ie+nghost; i++){
01068 pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
01069 }
01070 }
01071 }
01072 if (pG->Nx[2] > 1) {
01073 for (k=ks; k<=ke; k++) {
01074 for (j=js; j<=je; j++) {
01075 for(i=ie+1; i<=ie+nghost; i++){
01076 pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
01077 }
01078 }
01079 }
01080 }
01081 #endif
01082
01083
01084
01085
01086
01087 if (pD->NGrid[1] == 1) {
01088
01089 for(k=ks; k<=ke; k++) {
01090 for(j=1; j<=nghost; j++){
01091 for(i=ie+1; i<=ie+nghost; i++){
01092 pG->U[k][js-j][i] = pG->U[k][je-(j-1)][i];
01093 pG->U[k][je+j][i] = pG->U[k][js+(j-1)][i];
01094 #ifdef MHD
01095 pG->B1i[k][js-j][i] = pG->B1i[k][je-(j-1)][i];
01096 pG->B2i[k][js-j][i] = pG->B2i[k][je-(j-1)][i];
01097 pG->B3i[k][js-j][i] = pG->B3i[k][je-(j-1)][i];
01098
01099 pG->B1i[k][je+j][i] = pG->B1i[k][js+(j-1)][i];
01100 pG->B2i[k][je+j][i] = pG->B2i[k][js+(j-1)][i];
01101 pG->B3i[k][je+j][i] = pG->B3i[k][js+(j-1)][i];
01102 #endif
01103 }
01104 }
01105 }
01106 #ifdef MHD
01107 for (j=1; j<=nghost; j++) {
01108 for (i=ie+1; i<=ie+nghost; i++) {
01109 pG->B3i[ke+1][js-j][i] = pG->B3i[ke+1][je-(j-1)][i];
01110 pG->B3i[ke+1][je+j][i] = pG->B3i[ke+1][js+(j-1)][i];
01111 }
01112 }
01113 #endif
01114
01115 #ifdef MPI_PARALLEL
01116 } else {
01117
01118
01119
01120
01121
01122
01123
01124 cnt = nghost*nghost*(ku-ks+1)*NVAR_SHARE;
01125 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01126 shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
01127
01128 pSnd = send_buf;
01129 for (k=ks; k<=ku; k++){
01130 for (j=je-nghost+1; j<=je; j++){
01131 for (i=ie+1; i<=ie+nghost; i++){
01132
01133 pCons = &(pG->U[k][j][i]);
01134
01135 *(pSnd++) = pCons->d;
01136 *(pSnd++) = pCons->M1;
01137 *(pSnd++) = pCons->M2;
01138 *(pSnd++) = pCons->M3;
01139 #ifndef BAROTROPIC
01140 *(pSnd++) = pCons->E;
01141 #endif
01142 #ifdef MHD
01143 *(pSnd++) = pCons->B1c;
01144 *(pSnd++) = pCons->B2c;
01145 *(pSnd++) = pCons->B3c;
01146 *(pSnd++) = pG->B1i[k][j][i];
01147 *(pSnd++) = pG->B2i[k][j][i];
01148 *(pSnd++) = pG->B3i[k][j][i];
01149 #endif
01150 #if (NSCALARS > 0)
01151 for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
01152 #endif
01153 }
01154 }
01155 }
01156
01157
01158 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01159 shearing_sheet_ox1_tag, pD->Comm_Domain);
01160
01161
01162 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01163
01164 pRcv = recv_buf;
01165 for (k=ks; k<=ku; k++){
01166 for (j=js-nghost; j<=js-1; j++){
01167 for (i=ie+1; i<=ie+nghost; i++){
01168
01169 pCons = &(pG->U[k][j][i]);
01170
01171 pCons->d = *(pRcv++);
01172 pCons->M1 = *(pRcv++);
01173 pCons->M2 = *(pRcv++);
01174 pCons->M3 = *(pRcv++);
01175 #ifndef BAROTROPIC
01176 pCons->E = *(pRcv++);
01177 #endif
01178 #ifdef MHD
01179 pCons->B1c = *(pRcv++);
01180 pCons->B2c = *(pRcv++);
01181 pCons->B3c = *(pRcv++);
01182 pG->B1i[k][j][i] = *(pRcv++);
01183 pG->B2i[k][j][i] = *(pRcv++);
01184 pG->B3i[k][j][i] = *(pRcv++);
01185 #endif
01186 #if (NSCALARS > 0)
01187 for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
01188 #endif
01189 }
01190 }
01191 }
01192
01193
01194 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01195 shearing_sheet_ox1_tag, pD->Comm_Domain, &rq);
01196
01197 pSnd = send_buf;
01198 for (k=ks; k<=ku; k++){
01199 for (j=js; j<=js+nghost-1; j++){
01200 for (i=ie+1; i<=ie+nghost; i++){
01201
01202 pCons = &(pG->U[k][j][i]);
01203
01204 *(pSnd++) = pCons->d;
01205 *(pSnd++) = pCons->M1;
01206 *(pSnd++) = pCons->M2;
01207 *(pSnd++) = pCons->M3;
01208 #ifndef BAROTROPIC
01209 *(pSnd++) = pCons->E;
01210 #endif
01211 #ifdef MHD
01212 *(pSnd++) = pCons->B1c;
01213 *(pSnd++) = pCons->B2c;
01214 *(pSnd++) = pCons->B3c;
01215 *(pSnd++) = pG->B1i[k][j][i];
01216 *(pSnd++) = pG->B2i[k][j][i];
01217 *(pSnd++) = pG->B3i[k][j][i];
01218 #endif
01219 #if (NSCALARS > 0)
01220 for (n=0; n<NSCALARS; n++) *(pSnd++) = pCons->s[n];
01221 #endif
01222 }
01223 }
01224 }
01225
01226
01227 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01228 shearing_sheet_ox1_tag, pD->Comm_Domain);
01229
01230
01231 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01232
01233 pRcv = recv_buf;
01234 for (k=ks; k<=ku; k++){
01235 for (j=je+1; j<=je+nghost; j++){
01236 for (i=ie+1; i<=ie+nghost; i++){
01237
01238 pCons = &(pG->U[k][j][i]);
01239
01240 pCons->d = *(pRcv++);
01241 pCons->M1 = *(pRcv++);
01242 pCons->M2 = *(pRcv++);
01243 pCons->M3 = *(pRcv++);
01244 #ifndef BAROTROPIC
01245 pCons->E = *(pRcv++);
01246 #endif
01247 #ifdef MHD
01248 pCons->B1c = *(pRcv++);
01249 pCons->B2c = *(pRcv++);
01250 pCons->B3c = *(pRcv++);
01251 pG->B1i[k][j][i] = *(pRcv++);
01252 pG->B2i[k][j][i] = *(pRcv++);
01253 pG->B3i[k][j][i] = *(pRcv++);
01254 #endif
01255 #if (NSCALARS > 0)
01256 for (n=0; n<NSCALARS; n++) pCons->s[n] = *(pRcv++);
01257 #endif
01258 }
01259 }
01260 }
01261 #endif
01262
01263 }
01264
01265
01266
01267
01268 #ifdef MHD
01269 for (k=ks; k<=ke; k++) {
01270 for (i=ie+1; i<=ie+nghost; i++){
01271 pG->U[k][je ][i].B2c = 0.5*(pG->B2i[k][je+1][i]+pG->B2i[k][je][i]);
01272 pG->U[k][js-1][i].B2c = 0.5*(pG->B2i[k][js-1][i]+pG->B2i[k][js][i]);
01273 }
01274 }
01275 #endif
01276
01277 }
01278
01279
01280
01281
01282
01283
01284 #ifndef FARGO
01285 if (pG->Nx[2] == 1 && ShBoxCoord==xz) {
01286
01287 for (j=js-nghost; j<=je+nghost; j++) {
01288 for (i=1; i<=nghost; i++) {
01289 #ifdef ADIABATIC
01290
01291 pG->U[ks][j][ie+i].E += (0.5/pG->U[ks][j][ie+i].d)*
01292 (SQR((pG->U[ks][j][ie+i].M3 - qomL*pG->U[ks][j][ie+i].d))
01293 - SQR(pG->U[ks][j][ie+i].M3));
01294 #endif
01295 pG->U[ks][j][ie+i].M3 -= qomL*pG->U[ks][j][ie+i].d;
01296 }
01297 }
01298
01299 }
01300 #endif
01301
01302 return;
01303 }
01304
01305
01306
01307
01308
01309
01310
01311
01312
01313
01314
01315
01316
01317 #ifdef MHD
01318 void RemapEy_ix1(DomainS *pD, Real ***emfy, Real **tEy)
01319 {
01320 GridS *pG = pD->Grid;
01321 int ie = pG->ie;
01322 int js = pG->js, je = pG->je;
01323 int ks = pG->ks, ke = pG->ke;
01324 int j,k,joffset,jremap;
01325 Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epsi;
01326 #ifdef MPI_PARALLEL
01327 int is = pG->is;
01328 int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
01329 int ierr,sendto_id,getfrom_id;
01330 double *pSnd,*pRcv;
01331 Real *pEy;
01332 MPI_Request rq;
01333 #endif
01334
01335
01336
01337
01338 xmin = pD->RootMinX[0];
01339 xmax = pD->RootMaxX[0];
01340 Lx = xmax - xmin;
01341
01342 xmin = pD->RootMinX[1];
01343 xmax = pD->RootMaxX[1];
01344 Ly = xmax - xmin;
01345
01346 qomL = qshear*Omega_0*Lx;
01347 yshear = qomL*pG->time;
01348 deltay = fmod(yshear, Ly);
01349 joffset = (int)(deltay/pG->dx2);
01350 epsi = (fmod(deltay,pG->dx2))/pG->dx2;
01351
01352
01353
01354
01355
01356 if (pD->NGrid[0] == 1) {
01357 for(k=ks; k<=ke+1; k++) {
01358 for(j=js; j<=je; j++){
01359 tEy[k][j] = emfy[k][j][ie+1];
01360 }
01361 }
01362
01363 #ifdef MPI_PARALLEL
01364 } else {
01365
01366
01367
01368 cnt = pG->Nx[1]*(pG->Nx[2]+1);
01369 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx1_id,
01370 remapEy_tag, pD->Comm_Domain, &rq);
01371
01372
01373
01374 pSnd = send_buf;
01375 for (k=ks; k<=ke+1; k++) {
01376 for (j=js; j<=je; j++) {
01377 pEy = &(emfy[k][j][is]);
01378 *(pSnd++) = *pEy;
01379 }
01380 }
01381 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx1_id,
01382 remapEy_tag, pD->Comm_Domain);
01383
01384
01385
01386 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01387
01388 pRcv = recv_buf;
01389 for (k=ks; k<=ke+1; k++) {
01390 for (j=js; j<=je; j++) {
01391 pEy = &(tEy[k][j]);
01392 *pEy = *(pRcv++);
01393 }
01394 }
01395 #endif
01396 }
01397
01398
01399
01400
01401
01402 if (pD->NGrid[1] == 1) {
01403
01404 for(k=ks; k<=ke+1; k++) {
01405 for(j=1; j<=nghost; j++){
01406 tEy[k][js-j] = tEy[k][je-(j-1)];
01407 tEy[k][je+j] = tEy[k][js+(j-1)];
01408 }
01409 }
01410
01411 #ifdef MPI_PARALLEL
01412 } else {
01413
01414
01415
01416 cnt = nghost*(pG->Nx[2] + 1);
01417
01418 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01419 remapEy_tag, pD->Comm_Domain, &rq);
01420
01421 pSnd = send_buf;
01422 for (k=ks; k<=ke+1; k++){
01423 for (j=je-nghost+1; j<=je; j++){
01424 pEy = &(tEy[k][j]);
01425 *(pSnd++) = *pEy;
01426 }
01427 }
01428
01429
01430 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01431 remapEy_tag, pD->Comm_Domain);
01432
01433
01434 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01435
01436 pRcv = recv_buf;
01437 for (k=ks; k<=ke+1; k++){
01438 for (j=js-nghost; j<=js-1; j++){
01439 pEy = &(tEy[k][j]);
01440 *pEy = *(pRcv++);
01441 }
01442 }
01443
01444
01445 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01446 remapEy_tag, pD->Comm_Domain, &rq);
01447
01448 pSnd = send_buf;
01449 for (k=ks; k<=ke+1; k++){
01450 for (j=js; j<=js+nghost-1; j++){
01451 pEy = &(tEy[k][j]);
01452 *(pSnd++) = *pEy;
01453 }
01454 }
01455
01456
01457 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01458 remapEy_tag, pD->Comm_Domain);
01459
01460
01461 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01462
01463 pRcv = recv_buf;
01464 for (k=ks; k<=ke+1; k++){
01465 for (j=je+1; j<=je+nghost; j++){
01466 pEy = &(tEy[k][j]);
01467 *pEy = *(pRcv++);
01468 }
01469 }
01470 #endif
01471 }
01472
01473
01474
01475
01476
01477 for(k=ks; k<=ke+1; k++) {
01478 RemapFlux(tEy[k],epsi,js,je+1,Flx);
01479 for(j=js; j<=je; j++){
01480 tEyBuf[k][j] = tEy[k][j] - (Flx[j+1] - Flx[j]);
01481 }
01482 }
01483
01484
01485
01486
01487
01488 if (pD->NGrid[1] == 1) {
01489
01490 for(k=ks; k<=ke+1; k++) {
01491 for(j=js; j<=je; j++){
01492 jremap = j - joffset;
01493 if (jremap < (int)js) jremap += pG->Nx[1];
01494 tEy[k][j] = tEyBuf[k][jremap];
01495 }
01496 }
01497
01498 #ifdef MPI_PARALLEL
01499 } else {
01500
01501
01502
01503
01504
01505
01506 get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
01507
01508
01509
01510 Ngrids = (int)(joffset/pG->Nx[1]);
01511 joverlap = joffset - Ngrids*pG->Nx[1];
01512
01513
01514
01515
01516
01517
01518
01519 if (joverlap != 0) {
01520
01521 jproc = my_jproc + (Ngrids + 1);
01522 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01523 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01524
01525 jproc = my_jproc - (Ngrids + 1);
01526 if (jproc < 0) jproc += pD->NGrid[1];
01527 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01528
01529
01530
01531
01532 cnt = joverlap*(pG->Nx[2]+1);
01533
01534 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01535 remapEy_tag, pD->Comm_Domain, &rq);
01536
01537 pSnd = send_buf;
01538 for (k=ks; k<=ke+1; k++) {
01539 for (j=je-(joverlap-1); j<=je; j++) {
01540 pEy = &(tEyBuf[k][j]);
01541 *(pSnd++) = *pEy;
01542 }
01543 }
01544 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01545 remapEy_tag, pD->Comm_Domain);
01546
01547
01548
01549
01550
01551
01552
01553 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01554
01555 pRcv = recv_buf;
01556 for (k=ks; k<=ke+1; k++) {
01557 for (j=js; j<=js+(joverlap-1); j++) {
01558 pEy = &(tEy[k][j]);
01559 *pEy = *(pRcv++);
01560 }
01561 }
01562
01563 }
01564
01565
01566
01567
01568
01569
01570 if (Ngrids == 0) {
01571
01572 for(k=ks; k<=ke+1; k++) {
01573 for(j=js+joverlap; j<=je; j++){
01574 jremap = j-joverlap;
01575 tEy[k][j] = tEyBuf[k][jremap];
01576 }
01577 }
01578
01579
01580
01581
01582
01583 } else {
01584
01585
01586
01587 jproc = my_jproc + Ngrids;
01588 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01589 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01590
01591 jproc = my_jproc - Ngrids;
01592 if (jproc < 0) jproc += pD->NGrid[1];
01593 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01594
01595 cnt = (pG->Nx[1]-joverlap)*(pG->Nx[2]+1);
01596
01597 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01598 remapEy_tag, pD->Comm_Domain, &rq);
01599
01600 pSnd = send_buf;
01601 for (k=ks; k<=ke+1; k++) {
01602 for (j=js; j<=je-joverlap; j++) {
01603 pEy = &(tEyBuf[k][j]);
01604 *(pSnd++) = *pEy;
01605 }
01606 }
01607 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01608 remapEy_tag, pD->Comm_Domain);
01609
01610
01611
01612
01613 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01614
01615 pRcv = recv_buf;
01616 for (k=ks; k<=ke+1; k++) {
01617 for (j=js+joverlap; j<=je; j++) {
01618 pEy = &(tEy[k][j]);
01619 *pEy = *(pRcv++);
01620 }
01621 }
01622 }
01623
01624 #endif
01625 }
01626
01627
01628
01629
01630 return;
01631 }
01632 #endif
01633
01634
01635
01636
01637
01638
01639
01640
01641
01642
01643
01644
01645
01646 #ifdef MHD
01647 void RemapEy_ox1(DomainS *pD, Real ***emfy, Real **tEy)
01648 {
01649 GridS *pG = pD->Grid;
01650 int is = pG->is;
01651 int js = pG->js, je = pG->je;
01652 int ks = pG->ks, ke = pG->ke;
01653 int j,k,joffset,jremap;
01654 Real xmin,xmax,Lx,Ly,qomL,yshear,deltay,epso;
01655 #ifdef MPI_PARALLEL
01656 int ie = pG->ie;
01657 int my_iproc,my_jproc,my_kproc,cnt,jproc,joverlap,Ngrids;
01658 int ierr,sendto_id,getfrom_id;
01659 double *pSnd,*pRcv;
01660 Real *pEy;
01661 MPI_Request rq;
01662 #endif
01663
01664
01665
01666
01667 xmin = pD->RootMinX[0];
01668 xmax = pD->RootMaxX[0];
01669 Lx = xmax - xmin;
01670
01671 xmin = pD->RootMinX[1];
01672 xmax = pD->RootMaxX[1];
01673 Ly = xmax - xmin;
01674
01675 qomL = qshear*Omega_0*Lx;
01676 yshear = qomL*pG->time;
01677 deltay = fmod(yshear, Ly);
01678 joffset = (int)(deltay/pG->dx2);
01679 epso = -(fmod(deltay,pG->dx2))/pG->dx2;
01680
01681
01682
01683
01684
01685 if (pD->NGrid[0] == 1) {
01686 for(k=ks; k<=ke+1; k++) {
01687 for(j=js; j<=je; j++){
01688 tEy[k][j] = emfy[k][j][is];
01689 }
01690 }
01691
01692 #ifdef MPI_PARALLEL
01693 } else {
01694
01695
01696
01697 cnt = pG->Nx[1]*(pG->Nx[2]+1);
01698
01699 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx1_id,
01700 remapEy_tag, pD->Comm_Domain, &rq);
01701
01702
01703
01704 pSnd = send_buf;
01705 for (k=ks; k<=ke+1; k++) {
01706 for (j=js; j<=je; j++) {
01707 pEy = &(emfy[k][j][ie+1]);
01708 *(pSnd++) = *pEy;
01709 }
01710 }
01711 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx1_id,
01712 remapEy_tag, pD->Comm_Domain);
01713
01714
01715
01716 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01717
01718 pRcv = recv_buf;
01719 for (k=ks; k<=ke+1; k++) {
01720 for (j=js; j<=je; j++) {
01721 pEy = &(tEy[k][j]);
01722 *pEy = *(pRcv++);
01723 }
01724 }
01725 #endif
01726 }
01727
01728
01729
01730
01731
01732 if (pD->NGrid[1] == 1) {
01733
01734 for(k=ks; k<=ke+1; k++) {
01735 for(j=1; j<=nghost; j++){
01736 tEy[k][js-j] = tEy[k][je-(j-1)];
01737 tEy[k][je+j] = tEy[k][js+(j-1)];
01738 }
01739 }
01740
01741 #ifdef MPI_PARALLEL
01742 } else {
01743
01744
01745
01746 cnt = nghost*(pG->Nx[2] + 1);
01747
01748 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01749 remapEy_tag, pD->Comm_Domain, &rq);
01750
01751 pSnd = send_buf;
01752 for (k=ks; k<=ke+1; k++){
01753 for (j=je-nghost+1; j<=je; j++){
01754 pEy = &(tEy[k][j]);
01755 *(pSnd++) = *pEy;
01756 }
01757 }
01758
01759
01760 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01761 remapEy_tag, pD->Comm_Domain);
01762
01763
01764 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01765
01766 pRcv = recv_buf;
01767 for (k=ks; k<=ke+1; k++){
01768 for (j=js-nghost; j<=js-1; j++){
01769 pEy = &(tEy[k][j]);
01770 *pEy = *(pRcv++);
01771 }
01772 }
01773
01774
01775 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
01776 remapEy_tag, pD->Comm_Domain, &rq);
01777
01778 pSnd = send_buf;
01779 for (k=ks; k<=ke+1; k++){
01780 for (j=js; j<=js+nghost-1; j++){
01781 pEy = &(tEy[k][j]);
01782 *(pSnd++) = *pEy;
01783 }
01784 }
01785
01786
01787 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
01788 remapEy_tag, pD->Comm_Domain);
01789
01790
01791 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01792
01793 pRcv = recv_buf;
01794 for (k=ks; k<=ke+1; k++){
01795 for (j=je+1; j<=je+nghost; j++){
01796 pEy = &(tEy[k][j]);
01797 *pEy = *(pRcv++);
01798 }
01799 }
01800 #endif
01801 }
01802
01803
01804
01805
01806
01807 for(k=ks; k<=ke+1; k++) {
01808 RemapFlux(tEy[k],epso,js,je+1,Flx);
01809 for(j=js; j<=je; j++){
01810 tEyBuf[k][j] = tEy[k][j] - (Flx[j+1] - Flx[j]);
01811 }
01812 }
01813
01814
01815
01816
01817
01818 if (pD->NGrid[1] == 1) {
01819
01820 for(k=ks; k<=ke+1; k++) {
01821 for(j=js; j<=je; j++){
01822 jremap = j + joffset;
01823 if (jremap > (int)je) jremap -= pG->Nx[1];
01824 tEy[k][j] = tEyBuf[k][jremap];
01825 }
01826 }
01827
01828 #ifdef MPI_PARALLEL
01829 } else {
01830
01831
01832
01833
01834
01835
01836 get_myGridIndex(pD, myID_Comm_world, &my_iproc, &my_jproc, &my_kproc);
01837
01838
01839
01840 Ngrids = (int)(joffset/pG->Nx[1]);
01841 joverlap = joffset - Ngrids*pG->Nx[1];
01842
01843
01844
01845
01846
01847
01848
01849 if (joverlap != 0) {
01850
01851 jproc = my_jproc - (Ngrids + 1);
01852 if (jproc < 0) jproc += pD->NGrid[1];
01853 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01854
01855 jproc = my_jproc + (Ngrids + 1);
01856 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01857 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01858
01859
01860
01861
01862 cnt = joverlap*(pG->Nx[2]+1);
01863
01864 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01865 remapEy_tag, pD->Comm_Domain, &rq);
01866
01867 pSnd = send_buf;
01868 for (k=ks; k<=ke+1; k++) {
01869 for (j=js; j<=js+(joverlap-1); j++) {
01870 pEy = &(tEyBuf[k][j]);
01871 *(pSnd++) = *pEy;
01872 }
01873 }
01874 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01875 remapEy_tag, pD->Comm_Domain);
01876
01877
01878
01879
01880
01881
01882 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01883
01884 pRcv = recv_buf;
01885 for (k=ks; k<=ke+1; k++) {
01886 for (j=je-(joverlap-1); j<=je; j++) {
01887 pEy = &(tEy[k][j]);
01888 *pEy = *(pRcv++);
01889 }
01890 }
01891
01892 }
01893
01894
01895
01896
01897
01898
01899 if (Ngrids == 0) {
01900
01901 for(k=ks; k<=ke+1; k++) {
01902 for(j=js; j<=je-joverlap; j++){
01903 jremap = j+joverlap;
01904 tEy[k][j] = tEyBuf[k][jremap];
01905 }
01906 }
01907
01908
01909
01910
01911
01912 } else {
01913
01914
01915
01916 jproc = my_jproc - Ngrids;
01917 if (jproc < 0) jproc += pD->NGrid[1];
01918 sendto_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01919
01920 jproc = my_jproc + Ngrids;
01921 if (jproc > (pD->NGrid[1]-1)) jproc -= pD->NGrid[1];
01922 getfrom_id = pD->GData[my_kproc][jproc][my_iproc].ID_Comm_Domain;
01923
01924 cnt = (pG->Nx[1]-joverlap)*(pG->Nx[2]+1);
01925
01926 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, getfrom_id,
01927 remapEy_tag, pD->Comm_Domain, &rq);
01928
01929 pSnd = send_buf;
01930 for (k=ks; k<=ke+1; k++) {
01931 for (j=js+joverlap; j<=je; j++) {
01932 pEy = &(tEyBuf[k][j]);
01933 *(pSnd++) = *pEy;
01934 }
01935 }
01936 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, sendto_id,
01937 remapEy_tag, pD->Comm_Domain);
01938
01939
01940
01941
01942 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
01943
01944 pRcv = recv_buf;
01945 for (k=ks; k<=ke+1; k++) {
01946 for (j=js; j<=je-joverlap; j++) {
01947 pEy = &(tEy[k][j]);
01948 *pEy = *(pRcv++);
01949 }
01950 }
01951 }
01952
01953 #endif
01954 }
01955
01956
01957
01958
01959 return;
01960 }
01961 #endif
01962
01963 #endif
01964
01965
01966
01967
01968
01969
01970
01971
01972 #ifdef FARGO
01973 void Fargo(DomainS *pD)
01974 {
01975 GridS *pG = pD->Grid;
01976 int is = pG->is, ie = pG->ie;
01977 int js = pG->js, je = pG->je;
01978 int ks = pG->ks, ke = pG->ke;
01979 int jfs = nfghost, jfe = pG->Nx[1] + nfghost - 1;
01980 int i,j,k,ku,jj,n,joffset;
01981 Real x1,x2,x3,yshear,eps;
01982 #if defined(ADIABATIC) && defined(SHEARING_BOX)
01983 Real qom_dt = qshear*Omega_0*pG->dt;
01984 #endif
01985 #ifdef CYLINDRICAL
01986 const Real *r=pG->r, *ri=pG->ri;
01987 #endif
01988 #ifdef MPI_PARALLEL
01989 int ierr,cnt;
01990 double *pSnd,*pRcv;
01991 FConsS *pFCons;
01992 MPI_Request rq;
01993 #endif
01994
01995
01996
01997
01998
01999 if (pG->Nx[2] > 1) ku=ke+1; else ku=ke;
02000 for(k=ks; k<=ku; k++) {
02001 for(j=jfs; j<=jfe+1; j++){
02002 for(i=is; i<=ie+1; i++){
02003 jj = j-(jfs-js);
02004 FargoVars[k][i][j].U[0] = pG->U[k][jj][i].d;
02005 FargoVars[k][i][j].U[1] = pG->U[k][jj][i].M1;
02006 FargoVars[k][i][j].U[2] = pG->U[k][jj][i].M2;
02007 FargoVars[k][i][j].U[3] = pG->U[k][jj][i].M3;
02008 #if defined(ADIABATIC) && defined(SHEARING_BOX)
02009 #ifdef MHD
02010
02011 pG->U[k][jj][i].E -= qom_dt*pG->U[k][jj][i].B1c*
02012 (pG->U[k][jj][i].B2c - (qom_dt/2.)*pG->U[k][jj][i].B1c);
02013 #endif
02014 FargoVars[k][i][j].U[4] = pG->U[k][jj][i].E;
02015 #endif
02016
02017
02018
02019 #ifdef MHD
02020 FargoVars[k][i][j].U[NFARGO-2] = pG->B3i[k][jj][i];
02021 FargoVars[k][i][j].U[NFARGO-1] = pG->B1i[k][jj][i];
02022 #endif
02023 #if (NSCALARS > 0)
02024 for(n=0;n<NSCALARS;n++) FargoVars[k][i][j].s[n] = pG->U[k][jj][i].s[n];
02025 #endif
02026 }
02027 }
02028 }
02029
02030
02031
02032
02033
02034 if (pD->NGrid[1] == 1) {
02035
02036 for(k=ks; k<=ku; k++) {
02037 for(i=is; i<=ie+1; i++){
02038 for(j=1; j<=nfghost; j++){
02039 FargoVars[k][i][jfs-j] = FargoVars[k][i][jfe-(j-1)];
02040 FargoVars[k][i][jfe+j] = FargoVars[k][i][jfs+(j-1)];
02041 }
02042 }
02043 }
02044
02045 #ifdef MPI_PARALLEL
02046 } else {
02047
02048
02049
02050
02051
02052
02053 cnt = (pG->Nx[0]+1)*nfghost*(ku-ks+1)*(NFARGO + NSCALARS);
02054 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->lx2_id,
02055 fargo_tag, pD->Comm_Domain, &rq);
02056
02057 pSnd = send_buf;
02058 for (k=ks; k<=ku; k++){
02059 for (i=is; i<=ie+1; i++){
02060 for (j=jfe-nfghost+1; j<=jfe; j++){
02061
02062 pFCons = &(FargoVars[k][i][j]);
02063 for (n=0; n<NFARGO; n++) *(pSnd++) = pFCons->U[n];
02064 #if (NSCALARS > 0)
02065 for (n=0; n<NSCALARS; n++) *(pSnd++) = pFCons->s[n];
02066 #endif
02067 }
02068 }
02069 }
02070
02071
02072 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->rx2_id,
02073 fargo_tag, pD->Comm_Domain);
02074
02075
02076 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
02077
02078 pRcv = recv_buf;
02079 for (k=ks; k<=ku; k++){
02080 for (i=is; i<=ie+1; i++){
02081 for (j=jfs-nfghost; j<=jfs-1; j++){
02082
02083 pFCons = &(FargoVars[k][i][j]);
02084 for (n=0; n<NFARGO; n++) pFCons->U[n] = *(pRcv++);
02085 #if (NSCALARS > 0)
02086 for (n=0; n<NSCALARS; n++) pFCons->s[n] = *(pRcv++);
02087 #endif
02088 }
02089 }
02090 }
02091
02092
02093 ierr = MPI_Irecv(recv_buf, cnt, MPI_DOUBLE, pG->rx2_id,
02094 fargo_tag, pD->Comm_Domain, &rq);
02095
02096 pSnd = send_buf;
02097 for (k=ks; k<=ku; k++){
02098 for (i=is; i<=ie+1; i++){
02099 for (j=jfs; j<=jfs+nfghost-1; j++){
02100
02101 pFCons = &(FargoVars[k][i][j]);
02102 for (n=0; n<NFARGO; n++) *(pSnd++) = pFCons->U[n];
02103 #if (NSCALARS > 0)
02104 for (n=0; n<NSCALARS; n++) *(pSnd++) = pFCons->s[n];
02105 #endif
02106 }
02107 }
02108 }
02109
02110
02111 ierr = MPI_Send(send_buf, cnt, MPI_DOUBLE, pG->lx2_id,
02112 fargo_tag, pD->Comm_Domain);
02113
02114
02115 ierr = MPI_Wait(&rq, MPI_STATUS_IGNORE);
02116
02117 pRcv = recv_buf;
02118 for (k=ks; k<=ku; k++){
02119 for (i=is; i<=ie+1; i++){
02120 for (j=jfe+1; j<=jfe+nfghost; j++){
02121
02122 pFCons = &(FargoVars[k][i][j]);
02123 for (n=0; n<NFARGO; n++) pFCons->U[n] = *(pRcv++);
02124 #if (NSCALARS > 0)
02125 for (n=0; n<NSCALARS; n++) pFCons->s[n] = *(pRcv++);
02126 #endif
02127 }
02128 }
02129 }
02130 #endif
02131 }
02132
02133
02134
02135
02136 for(k=ks; k<=ku; k++) {
02137 for(i=is; i<=ie+1; i++){
02138
02139
02140 cc_pos(pG, i, js, ks, &x1,&x2,&x3);
02141 #ifdef SHEARING_BOX
02142 yshear = -qshear*Omega_0*x1*pG->dt;
02143 #endif
02144 #ifdef CYLINDRICAL
02145 yshear = ((*OrbitalProfile)(x1))*pG->dt;
02146 #endif
02147 joffset = (int)(yshear/pG->dx2);
02148 if (abs(joffset) > (jfs-js))
02149 ath_error("[bvals_shear]: FARGO offset exceeded # of gh zns\n");
02150 eps = (fmod(yshear,pG->dx2))/pG->dx2;
02151
02152
02153 #ifdef MHD
02154 for (n=0; n<(NFARGO-2); n++) {
02155 #else
02156 for (n=0; n<(NFARGO); n++) {
02157 #endif
02158 for (j=jfs-nfghost; j<=jfe+nfghost; j++) U[j] = FargoVars[k][i][j].U[n];
02159 RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02160
02161 for(j=jfs; j<=jfe+1; j++){
02162 FargoFlx[k][i][j].U[n] = Flx[j-joffset];
02163
02164
02165 for (jj=1; jj<=joffset; jj++) {
02166 FargoFlx[k][i][j].U[n] += FargoVars[k][i][j-jj].U[n];
02167 }
02168 for (jj=(joffset+1); jj<=0; jj++) {
02169 FargoFlx[k][i][j].U[n] -= FargoVars[k][i][j-jj].U[n];
02170 }
02171 }
02172 }
02173
02174
02175 #if (NSCALARS > 0)
02176 for (n=0; n<(NSCALARS); n++) {
02177
02178 for (j=jfs-nfghost; j<=jfe+nfghost; j++) U[j] = FargoVars[k][i][j].s[n];
02179 RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02180
02181 for(j=jfs; j<=jfe+1; j++){
02182 FargoFlx[k][i][j].s[n] = Flx[j-joffset];
02183 for (jj=1; jj<=joffset; jj++) {
02184 FargoFlx[k][i][j].s[n] += FargoVars[k][i][j-jj].s[n];
02185 }
02186 for (jj=(joffset+1); jj<=0; jj++) {
02187 FargoFlx[k][i][j].s[n] -= FargoVars[k][i][j-jj].s[n];
02188 }
02189 }
02190
02191 }
02192 #endif
02193
02194 #ifdef MHD
02195
02196
02197 for (j=jfs-nfghost; j<=jfe+nfghost; j++) {
02198 U[j] = FargoVars[k][i][j].U[NFARGO-2];
02199 }
02200 RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02201
02202 for(j=jfs; j<=jfe+1; j++){
02203 FargoFlx[k][i][j].U[NFARGO-2] = -Flx[j-joffset];
02204 for (jj=1; jj<=joffset; jj++) {
02205 FargoFlx[k][i][j].U[NFARGO-2] -= FargoVars[k][i][j-jj].U[NFARGO-2];
02206 }
02207 for (jj=(joffset+1); jj<=0; jj++) {
02208 FargoFlx[k][i][j].U[NFARGO-2] += FargoVars[k][i][j-jj].U[NFARGO-2];
02209 }
02210 }
02211
02212
02213 #ifdef SHEARING_BOX
02214 yshear = -qshear*Omega_0*(x1 - 0.5*pG->dx1)*pG->dt;
02215 #endif
02216 #ifdef CYLINDRICAL
02217 yshear = ((*OrbitalProfile)(ri[i]))*pG->dt;
02218 #endif
02219 joffset = (int)(yshear/pG->dx2);
02220 if (abs(joffset) > (jfs-js))
02221 ath_error("[bvals_shear]: FARGO offset exceeded # of gh zns\n");
02222 eps = (fmod(yshear,pG->dx2))/pG->dx2;
02223
02224 for (j=jfs-nfghost; j<=jfe+nfghost; j++) {
02225 U[j] = FargoVars[k][i][j].U[NFARGO-1];
02226 }
02227 RemapFlux(U,eps,(jfs-joffset),(jfe+1-joffset),Flx);
02228
02229 for(j=jfs; j<=jfe+1; j++){
02230 FargoFlx[k][i][j].U[NFARGO-1] = Flx[j-joffset];
02231 for (jj=1; jj<=joffset; jj++) {
02232 FargoFlx[k][i][j].U[NFARGO-1] += FargoVars[k][i][j-jj].U[NFARGO-1];
02233 }
02234 for (jj=(joffset+1); jj<=0; jj++) {
02235 FargoFlx[k][i][j].U[NFARGO-1] -= FargoVars[k][i][j-jj].U[NFARGO-1];
02236 }
02237 }
02238 #endif
02239
02240 }
02241 }
02242
02243
02244
02245 for(k=ks; k<=ke; k++) {
02246 for(j=js; j<=je; j++){
02247 jj = j-js+jfs;
02248 for(i=is; i<=ie; i++){
02249 pG->U[k][j][i].d -=(FargoFlx[k][i][jj+1].U[0]-FargoFlx[k][i][jj].U[0]);
02250 pG->U[k][j][i].M1 -=(FargoFlx[k][i][jj+1].U[1]-FargoFlx[k][i][jj].U[1]);
02251 pG->U[k][j][i].M2 -=(FargoFlx[k][i][jj+1].U[2]-FargoFlx[k][i][jj].U[2]);
02252 pG->U[k][j][i].M3 -=(FargoFlx[k][i][jj+1].U[3]-FargoFlx[k][i][jj].U[3]);
02253 #ifdef ADIABATIC
02254 pG->U[k][j][i].E -=(FargoFlx[k][i][jj+1].U[4]-FargoFlx[k][i][jj].U[4]);
02255 #endif
02256 #if (NSCALARS > 0)
02257 for (n=0; n<NSCALARS; n++) {
02258 pG->U[k][j][i].s[n]-=FargoFlx[k][i][jj+1].s[n]-FargoFlx[k][i][jj].s[n];
02259 }
02260 #endif
02261 }
02262 }
02263 }
02264
02265
02266
02267
02268
02269
02270 #ifdef MHD
02271 if (pG->Nx[2]==1) ath_error("[Fargo] only works in 3D with MHD\n");
02272 for (k=ks; k<=ke; k++) {
02273 for (j=js; j<=je; j++) {
02274 jj = j-js+jfs;
02275 for (i=is; i<=ie; i++) {
02276 pG->B1i[k][j][i] -=
02277 (FargoFlx[k][i][jj+1].U[NFARGO-1] - FargoFlx[k][i][jj].U[NFARGO-1]);
02278 #ifdef SHEARING_BOX
02279 pG->B2i[k][j][i] += (pG->dx2/pG->dx1)*
02280 (FargoFlx[k][i+1][jj].U[NFARGO-1] - FargoFlx[k][i][jj].U[NFARGO-1]);
02281 pG->B2i[k][j][i] -= (pG->dx2/pG->dx3)*
02282 (FargoFlx[k+1][i][jj].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02283 #endif
02284 #ifdef CYLINDRICAL
02285 pG->B2i[k][j][i] += (pG->dx2/pG->dx1)*
02286 (ri[i+1]*FargoFlx[k][i+1][jj].U[NFARGO-1] - ri[i]*FargoFlx[k][i][jj].U[NFARGO-1]);
02287 pG->B2i[k][j][i] -= (r[i]*pG->dx2/pG->dx3)*
02288 (FargoFlx[k+1][i][jj].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02289 #endif
02290 pG->B3i[k][j][i] +=
02291 (FargoFlx[k][i][jj+1].U[NFARGO-2] - FargoFlx[k][i][jj].U[NFARGO-2]);
02292 }
02293 pG->B1i[k][j][ie+1] -=
02294 (FargoFlx[k][ie+1][jj+1].U[NFARGO-1]-FargoFlx[k][ie+1][jj].U[NFARGO-1]);
02295 }
02296 for (i=is; i<=ie; i++) {
02297 #ifdef SHEARING_BOX
02298 pG->B2i[k][je+1][i] += (pG->dx2/pG->dx1)*
02299 (FargoFlx[k][i+1][jfe+1].U[NFARGO-1]-FargoFlx[k][i][jfe+1].U[NFARGO-1]);
02300 pG->B2i[k][je+1][i] -= (pG->dx2/pG->dx3)*
02301 (FargoFlx[k+1][i][jfe+1].U[NFARGO-2]-FargoFlx[k][i][jfe+1].U[NFARGO-2]);
02302 #endif
02303 #ifdef CYLINDRICAL
02304 pG->B2i[k][je+1][i] += (pG->dx2/pG->dx1)*
02305 (ri[i+1]*FargoFlx[k][i+1][jfe+1].U[NFARGO-1]-ri[i]*FargoFlx[k][i][jfe+1].U[NFARGO-1]);
02306 pG->B2i[k][je+1][i] -= (r[i]*pG->dx2/pG->dx3)*
02307 (FargoFlx[k+1][i][jfe+1].U[NFARGO-2]-FargoFlx[k][i][jfe+1].U[NFARGO-2]);
02308 #endif
02309 }
02310 }
02311 for (j=js; j<=je; j++) {
02312 jj = j-js+jfs;
02313 for (i=is; i<=ie; i++) {
02314 pG->B3i[ke+1][j][i] +=
02315 (FargoFlx[ke+1][i][jj+1].U[NFARGO-2]-FargoFlx[ke+1][i][jj].U[NFARGO-2]);
02316 }
02317 }
02318 #endif
02319
02320
02321
02322
02323 #ifdef MHD
02324 for (k=ks; k<=ke; k++) {
02325 for (j=js; j<=je; j++) {
02326 for(i=is; i<=ie; i++){
02327 #ifdef SHEARING_BOX
02328 pG->U[k][j][i].B1c = 0.5*(pG->B1i[k][j][i]+pG->B1i[k][j][i+1]);
02329 #endif
02330 #ifdef CYLINDRICAL
02331 pG->U[k][j][i].B1c = 0.5*(1.0/r[i])*(ri[i]*pG->B1i[k][j][i] + ri[i+1]*pG->B1i[k][j][i+1]);
02332 #endif
02333 pG->U[k][j][i].B2c = 0.5*(pG->B2i[k][j][i]+pG->B2i[k][j+1][i]);
02334 pG->U[k][j][i].B3c = 0.5*(pG->B3i[k][j][i]+pG->B3i[k+1][j][i]);
02335 }
02336 }
02337 }
02338 #endif
02339
02340 return;
02341 }
02342 #endif
02343
02344 #if defined(SHEARING_BOX) || (defined(CYLINDRICAL) && defined(FARGO))
02345
02346
02347
02348
02349
02350 void bvals_shear_init(MeshS *pM)
02351 {
02352 GridS *pG;
02353 int nl,nd,nx1,nx2,nx3,max1=0,max2=0,max3=0;
02354 #ifdef MPI_PARALLEL
02355 int size1=0,size2=0,size;
02356 #endif
02357 #ifdef FARGO
02358 Real xmin,xmax,x2,x3;
02359 #endif
02360 #ifdef CYLINDRICAL
02361 Real MachKep;
02362 #endif
02363
02364
02365
02366 for (nl=0; nl<(pM->NLevels); nl++){
02367 for (nd=0; nd<(pM->DomainsPerLevel[nl]); nd++){
02368 if (pM->Domain[nl][nd].Grid != NULL) {
02369 pG=pM->Domain[nl][nd].Grid;
02370
02371 nx1 = pG->Nx[0] + 2*nghost;
02372 nx2 = pG->Nx[1] + 2*nghost;
02373 nx3 = pG->Nx[2] + 2*nghost;
02374 max1 = MAX(max1,nx1);
02375 max2 = MAX(max2,nx2);
02376 max3 = MAX(max3,nx3);
02377 }
02378 }
02379 }
02380
02381
02382
02383 if((GhstZns=(Remap***)calloc_3d_array(max3,nghost,max2,sizeof(Remap)))==NULL)
02384 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02385
02386 if((GhstZnsBuf=(Remap***)calloc_3d_array(max3,nghost,max2,sizeof(Remap))) ==
02387 NULL) ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02388
02389 #ifdef MHD
02390 if ((tEyBuf=(Real**)calloc_2d_array(max3,max2,sizeof(Real))) == NULL)
02391 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02392 #endif
02393
02394
02395 #ifdef FARGO
02396 xmin = pM->RootMinX[0];
02397 xmax = pM->RootMaxX[0];
02398 #ifdef SHEARING_BOX
02399 nfghost = nghost + ((int)(1.5*CourNo*MAX(fabs(xmin),fabs(xmax))) + 1);
02400 #endif
02401 #ifdef CYLINDRICAL
02402 MachKep = MAX( xmin*(*OrbitalProfile)(xmin), xmax*(*OrbitalProfile)(xmax) )/Iso_csound;
02403 nfghost = nghost + 1 + ((int)(CourNo*MachKep));
02404 #endif
02405 max2 = max2 + 2*nfghost;
02406
02407 if((FargoVars=(FConsS***)calloc_3d_array(max3,max1,max2,sizeof(FConsS)))
02408 ==NULL) ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02409
02410 if((FargoFlx=(FConsS***)calloc_3d_array(max3,max1,max2,sizeof(FConsS)))==NULL)
02411 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02412 #endif
02413
02414 if((U = (Real*)malloc(max2*sizeof(Real))) == NULL)
02415 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02416
02417 if((Flx = (Real*)malloc(max2*sizeof(Real))) == NULL)
02418 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02419
02420 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02421 if ((Uhalf = (Real*)malloc(max2*sizeof(Real))) == NULL)
02422 ath_error("[bvals_shear_init]: malloc returned a NULL pointer\n");
02423 #endif
02424
02425
02426
02427 #ifdef MPI_PARALLEL
02428 size1 = nghost*pG->Nx[1]*(pG->Nx[2]+1)*(NREMAP+NSCALARS);
02429 #ifdef FARGO
02430 size2 = nfghost*(pG->Nx[0]+1)*(pG->Nx[2]+1)*(NFARGO+NSCALARS);
02431 #endif
02432 size = MAX(size1,size2);
02433
02434 if((send_buf = (double*)malloc(size*sizeof(double))) == NULL)
02435 ath_error("[bvals_shear_init]: Failed to allocate send buffer\n");
02436
02437 if((recv_buf = (double*)malloc(size*sizeof(double))) == NULL)
02438 ath_error("[bvals_shear_init]: Failed to allocate receive buffer\n");
02439 #endif
02440
02441 return;
02442 }
02443
02444
02445
02446
02447
02448
02449 void bvals_shear_destruct(void)
02450 {
02451 if (GhstZns != NULL) free_3d_array(GhstZns);
02452 if (GhstZnsBuf != NULL) free_3d_array(GhstZnsBuf);
02453 #ifdef MHD
02454 if (tEyBuf != NULL) free_2d_array(tEyBuf);
02455 #endif
02456 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02457 if (Uhalf != NULL) free(Uhalf);
02458 #endif
02459 #ifdef FARGO
02460 if (FargoVars != NULL) free_3d_array(FargoVars);
02461 if (FargoFlx != NULL) free_3d_array(FargoFlx);
02462 #endif
02463 if (U != NULL) free(U);
02464 if (Flx != NULL) free(Flx);
02465 #ifdef MPI_PARALLEL
02466 if (send_buf != NULL) free(send_buf);
02467 if (recv_buf != NULL) free(recv_buf);
02468 #endif
02469
02470 return;
02471 }
02472
02473
02474
02475
02476
02477
02478
02479
02480
02481
02482
02483
02484
02485 #if defined(SECOND_ORDER_CHAR) || defined (SECOND_ORDER_PRIM)
02486
02487
02488
02489
02490
02491
02492
02493 void RemapFlux(const Real *U, const Real eps,
02494 const int jinner, const int jouter, Real *Flux)
02495 {
02496 int j,jl,ju;
02497 Real dUc,dUl,dUr,dUm,lim_slope;
02498
02499
02500
02501
02502 if (eps > 0.0) {
02503 jl = jinner-1;
02504 ju = jouter-1;
02505 } else {
02506 jl = jinner;
02507 ju = jouter;
02508 }
02509
02510 for (j=jl; j<=ju; j++) {
02511 dUc = U[j+1] - U[j-1];
02512 dUl = U[j ] - U[j-1];
02513 dUr = U[j+1] - U[j ];
02514
02515 dUm = 0.0;
02516 if (dUl*dUr > 0.0) {
02517 lim_slope = MIN(fabs(dUl),fabs(dUr));
02518 dUm = SIGN(dUc)*MIN(0.5*fabs(dUc),2.0*lim_slope);
02519 }
02520
02521 if (eps > 0.0) {
02522 Flux[j+1] = eps*(U[j] + 0.5*(1.0 - eps)*dUm);
02523 } else {
02524 Flux[j ] = eps*(U[j] - 0.5*(1.0 + eps)*dUm);
02525 }
02526 }
02527
02528 return;
02529 }
02530
02531 #endif
02532
02533 #if defined(THIRD_ORDER_CHAR) || defined(THIRD_ORDER_PRIM)
02534
02535
02536
02537
02538
02539
02540
02541 void RemapFlux(const Real *U, const Real eps,
02542 const int jinner, const int jouter, Real *Flux)
02543 {
02544 int j,jl,ju;
02545 Real d2Uc,d2Ul,d2Ur,d2U,d2Ulim,lim_slope,Ulv,Urv,dU,U6,qa,qb,qc,qx;
02546
02547
02548
02549
02550 if (eps > 0.0) {
02551 jl = jinner-1;
02552 ju = jouter-1;
02553 } else {
02554 jl = jinner;
02555 ju = jouter;
02556 }
02557
02558 for (j=jl; j<=ju+1; j++) {
02559 Uhalf[j]=(7.0*(U[j-1]+U[j]) - (U[j-2]+U[j+1]))/12.0;
02560 d2Uc = 3.0*(U[j-1] - 2.0*Uhalf[j] + U[j]);
02561 d2Ul = (U[j-2] - 2.0*U[j-1] + U[j ]);
02562 d2Ur = (U[j-1] - 2.0*U[j ] + U[j+1]);
02563 d2Ulim = 0.0;
02564 lim_slope = MIN(fabs(d2Ul),fabs(d2Ur));
02565 if (d2Uc > 0.0 && d2Ul > 0.0 && d2Ur > 0.0) {
02566 d2Ulim = SIGN(d2Uc)*MIN(1.25*lim_slope,fabs(d2Uc));
02567 }
02568 if (d2Uc < 0.0 && d2Ul < 0.0 && d2Ur < 0.0) {
02569 d2Ulim = SIGN(d2Uc)*MIN(1.25*lim_slope,fabs(d2Uc));
02570 }
02571 Uhalf[j] = 0.5*((U[j-1]+U[j]) - d2Ulim/3.0);
02572 }
02573
02574 for (j=jl; j<=ju; j++) {
02575 Ulv = Uhalf[j ];
02576 Urv = Uhalf[j+1];
02577
02578 qa = (Urv-U[j])*(U[j]-Ulv);
02579 qb = (U[j-1]-U[j])*(U[j]-U[j+1]);
02580 if (qa <= 0.0 && qb <= 0.0) {
02581 qc = 6.0*(U[j] - 0.5*(Ulv+Urv));
02582 d2U = -2.0*qc;
02583 d2Uc = (U[j-1] - 2.0*U[j ] + U[j+1]);
02584 d2Ul = (U[j-2] - 2.0*U[j-1] + U[j ]);
02585 d2Ur = (U[j ] - 2.0*U[j+1] + U[j+2]);
02586 d2Ulim = 0.0;
02587 lim_slope = MIN(fabs(d2Ul),fabs(d2Ur));
02588 lim_slope = MIN(fabs(d2Uc),lim_slope);
02589 if (d2Uc > 0.0 && d2Ul > 0.0 && d2Ur > 0.0 && d2U > 0.0) {
02590 d2Ulim = SIGN(d2U)*MIN(1.25*lim_slope,fabs(d2U));
02591 }
02592 if (d2Uc < 0.0 && d2Ul < 0.0 && d2Ur < 0.0 && d2U < 0.0) {
02593 d2Ulim = SIGN(d2U)*MIN(1.25*lim_slope,fabs(d2U));
02594 }
02595 if (d2U == 0.0) {
02596 Ulv = U[j];
02597 Urv = U[j];
02598 } else {
02599 Ulv = U[j] + (Ulv - U[j])*d2Ulim/d2U;
02600 Urv = U[j] + (Urv - U[j])*d2Ulim/d2U;
02601 }
02602 }
02603
02604 qa = (Urv-U[j])*(U[j]-Ulv);
02605 qb = Urv-Ulv;
02606 qc = 6.0*(U[j] - 0.5*(Ulv+Urv));
02607 if (qa <= 0.0) {
02608 Ulv = U[j];
02609 Urv = U[j];
02610 } else if ((qb*qc) > (qb*qb)) {
02611 Ulv = 3.0*U[j] - 2.0*Urv;
02612 } else if ((qb*qc) < -(qb*qb)) {
02613 Urv = 3.0*U[j] - 2.0*Ulv;
02614 }
02615
02616 dU = Urv - Ulv;
02617 U6 = 6.0*(U[j] - 0.5*(Ulv + Urv));
02618
02619 if (eps > 0.0) {
02620 qx = TWO_3RDS*eps;
02621 Flux[j+1] = eps*(Urv - 0.75*qx*(dU - (1.0 - qx)*U6));
02622
02623 } else {
02624 qx = -TWO_3RDS*eps;
02625 Flux[j ] = eps*(Ulv + 0.75*qx*(dU + (1.0 - qx)*U6));
02626 }
02627 }
02628
02629 return;
02630 }
02631
02632 #endif
02633
02634 #endif