00001 #include "../copyright.h"
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041 #include <stdio.h>
00042 #include <stdlib.h>
00043 #include <math.h>
00044 #include "../defs.h"
00045 #include "../athena.h"
00046 #include "../prototypes.h"
00047 #include "prototypes.h"
00048 #include "particle.h"
00049 #include "../globals.h"
00050
00051
00052 #ifdef PARTICLES
00053
00054
00055 #ifdef MPI_PARALLEL
00056 #define NVAR_P 10
00057 #else
00058 #define NVAR_P 9
00059 #endif
00060
00061
00062
00063
00064 static double *send_buf = NULL, *recv_buf = NULL;
00065 static long NBUF;
00066 static long send_bufsize;
00067 static long recv_bufsize;
00068 static int nbc;
00069
00070
00071 static int my_iproc, my_jproc, my_kproc;
00072
00073 static Real x1min,x1max,x2min,x2max,x3min,x3max;
00074 static Real Lx1, Lx2, Lx3;
00075 static int NShuffle;
00076
00077
00078 static VBCFun_t apply_ix1 = NULL, apply_ox1 = NULL;
00079 static VBCFun_t apply_ix2 = NULL, apply_ox2 = NULL;
00080 static VBCFun_t apply_ix3 = NULL, apply_ox3 = NULL;
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098 static void realloc_sendbuf();
00099 static void realloc_recvbuf();
00100
00101 static void update_particle_status(Grid *pG);
00102
00103 static void reflect_ix1_particle(Grid *pG);
00104 static void reflect_ox1_particle(Grid *pG);
00105 static void reflect_ix2_particle(Grid *pG);
00106 static void reflect_ox2_particle(Grid *pG);
00107 static void reflect_ix3_particle(Grid *pG);
00108 static void reflect_ox3_particle(Grid *pG);
00109
00110 static void outflow_particle(Grid *pG);
00111
00112 static void periodic_ix1_particle(Grid *pG);
00113 static void periodic_ox1_particle(Grid *pG);
00114 static void periodic_ix2_particle(Grid *pG);
00115 static void periodic_ox2_particle(Grid *pG);
00116 static void periodic_ix3_particle(Grid *pG);
00117 static void periodic_ox3_particle(Grid *pG);
00118
00119 static long packing_ix1_particle(Grid *pG, int nlayer);
00120 static long packing_ox1_particle(Grid *pG, int nlayer);
00121 static long packing_ix2_particle(Grid *pG, int nlayer);
00122 static long packing_ox2_particle(Grid *pG, int nlayer);
00123 static long packing_ix3_particle(Grid *pG, int nlayer);
00124 static long packing_ox3_particle(Grid *pG, int nlayer);
00125 static void packing_one_particle(Grain *cur, long n, short pos);
00126 static void shift_packed_particle(double *buf, long n, int index, double shift);
00127 static void unpack_particle(Grid *pG, double *buf, long n);
00128
00129 #ifdef SHEARING_BOX
00130 static void shearingbox_ix1_particle(Grid *pG, Domain *pD, long numpar);
00131 static void shearingbox_ox1_particle(Grid *pG, Domain *pD, long numpar);
00132
00133 static long packing_ix1_particle_shear(Grid *pG, int reg, long numpar);
00134 static long packing_ox1_particle_shear(Grid *pG, int reg, long numpar);
00135
00136 #ifdef FARGO
00137 static long packing_particle_fargo(Grid *pG, Real yl, Real yu);
00138 static int gridshift(Real shift);
00139 #endif
00140
00141 #endif
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160 void set_bvals_particle(Grid *pG, Domain *pD)
00161 {
00162 #ifdef MPI_PARALLEL
00163 int err;
00164 long cnt_send, cnt_recv;
00165 MPI_Request rq;
00166 MPI_Status stat;
00167 #endif
00168 #ifdef SHEARING_BOX
00169 long numpar;
00170 int vyind;
00171
00172 #endif
00173
00174
00175
00176
00177
00178
00179 if ((NShuffle>0) && (fmod(pG->nstep, NShuffle)<0.1))
00180 shuffle(pG);
00181
00182
00183
00184
00185 if (pG->Nx1 > 1){
00186
00187 #ifdef SHEARING_BOX
00188 numpar = pG->nparticle;
00189 if (pG->Nx3 > 1)
00190 vyind = 5;
00191 else
00192 vyind = 6;
00193 #endif
00194
00195 #ifdef MPI_PARALLEL
00196
00197
00198 if (pG->rx1_id >= 0 && pG->lx1_id >= 0) {
00199
00200
00201
00202
00203 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx1_id,
00204 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00205 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00206
00207
00208 cnt_send = packing_ox1_particle(pG, nghost);
00209
00210
00211 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx1_id,
00212 boundary_particle_tag, MPI_COMM_WORLD);
00213 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00214
00215 if (my_iproc == pD->NGrid_x1-1) {
00216
00217 shift_packed_particle(send_buf, cnt_send, 1, -Lx1);
00218 #ifdef SHEARING_BOX
00219
00220 #ifndef FARGO
00221
00222 shift_packed_particle(send_buf, cnt_send, vyind, vshear);
00223 #endif
00224 #endif
00225 }
00226
00227
00228 err = MPI_Wait(&rq, &stat);
00229 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00230
00231
00232 if ((cnt_recv+1) >= recv_bufsize)
00233 realloc_recvbuf();
00234
00235
00236 if (cnt_recv > 0) {
00237
00238 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00239 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00240 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00241 }
00242
00243 if (cnt_send > 0) {
00244
00245 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00246 boundary_particle_tag, MPI_COMM_WORLD);
00247 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00248 }
00249
00250 if (cnt_recv > 0) {
00251
00252 err = MPI_Wait(&rq, &stat);
00253 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00254
00255
00256 unpack_particle(pG, recv_buf, cnt_recv);
00257 }
00258
00259
00260
00261
00262 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx1_id,
00263 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00264 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00265
00266
00267 cnt_send = packing_ix1_particle(pG, nghost);
00268
00269
00270 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx1_id,
00271 boundary_particle_tag, MPI_COMM_WORLD);
00272 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00273
00274 if (my_iproc == 0) {
00275
00276 shift_packed_particle(send_buf, cnt_send, 1, Lx1);
00277 #ifdef SHEARING_BOX
00278
00279 #ifndef FARGO
00280
00281 shift_packed_particle(send_buf, cnt_send, vyind, -vshear);
00282 #endif
00283 #endif
00284 }
00285
00286
00287 err = MPI_Wait(&rq, &stat);
00288 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00289
00290
00291 if ((cnt_recv+1) >= recv_bufsize)
00292 realloc_recvbuf();
00293
00294
00295 if (cnt_recv > 0) {
00296
00297 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00298 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00299 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00300 }
00301
00302 if (cnt_send > 0) {
00303
00304 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00305 boundary_particle_tag, MPI_COMM_WORLD);
00306 if(err) ath_error("[send_ox1_particle]: MPI_Send error = %d\n",err);
00307 }
00308
00309 if (cnt_recv > 0) {
00310
00311 err = MPI_Wait(&rq, &stat);
00312 if(err) ath_error("[receive_ox1_particle]: MPI_Wait error = %d\n",err);
00313
00314
00315 unpack_particle(pG, recv_buf, cnt_recv);
00316 }
00317 }
00318
00319
00320 if ((pG->rx1_id >= 0) && (pG->lx1_id < 0)) {
00321
00322
00323
00324
00325 cnt_send = packing_ox1_particle(pG, nghost);
00326
00327
00328 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx1_id,
00329 boundary_particle_tag, MPI_COMM_WORLD);
00330 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00331
00332 if (cnt_send > 0) {
00333
00334 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00335 boundary_particle_tag, MPI_COMM_WORLD);
00336 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00337 }
00338
00339
00340 (*apply_ix1)(pG);
00341
00342
00343
00344
00345 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx1_id,
00346 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00347 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00348
00349
00350 err = MPI_Wait(&rq, &stat);
00351 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00352
00353
00354 if ((cnt_recv+1) >= recv_bufsize)
00355 realloc_recvbuf();
00356
00357 if (cnt_recv > 0) {
00358
00359 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx1_id,
00360 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00361 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00362
00363
00364 err = MPI_Wait(&rq, &stat);
00365 if(err) ath_error("[receive_ox1_particle]: MPI_Wait error = %d\n",err);
00366
00367
00368 unpack_particle(pG, recv_buf, cnt_recv);
00369 }
00370 }
00371
00372
00373 if ((pG->lx1_id >= 0) && (pG->rx1_id < 0)) {
00374
00375
00376
00377
00378 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx1_id,
00379 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00380 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00381
00382
00383 err = MPI_Wait(&rq, &stat);
00384 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00385
00386
00387 if ((cnt_recv+1) >= recv_bufsize)
00388 realloc_recvbuf();
00389
00390 if (cnt_recv > 0) {
00391
00392 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00393 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00394 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00395 }
00396
00397
00398 (*apply_ox1)(pG);
00399
00400
00401
00402
00403 cnt_send = packing_ix1_particle(pG, nghost);
00404
00405
00406 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx1_id,
00407 boundary_particle_tag, MPI_COMM_WORLD);
00408 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00409
00410 if (cnt_send > 0) {
00411
00412 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx1_id,
00413 boundary_particle_tag, MPI_COMM_WORLD);
00414 if(err) ath_error("[send_ox1_particle]: MPI_Send error = %d\n",err);
00415 }
00416
00417
00418
00419 if (cnt_recv > 0) {
00420
00421 err = MPI_Wait(&rq, &stat);
00422 if(err) ath_error("[receive_ix1_particle]: MPI_Wait error = %d\n",err);
00423
00424
00425 unpack_particle(pG, recv_buf, cnt_recv);
00426 }
00427 }
00428 #endif
00429
00430
00431 if (pG->rx1_id < 0 && pG->lx1_id < 0) {
00432 (*apply_ix1)(pG);
00433 (*apply_ox1)(pG);
00434 }
00435
00436 #ifdef SHEARING_BOX
00437 if (pG->Nx3>1) {
00438
00439 if (my_iproc == 0)
00440 shearingbox_ix1_particle(pG, pD, numpar);
00441
00442 if (my_iproc == (pD->NGrid_x1-1))
00443 shearingbox_ox1_particle(pG, pD, numpar);
00444 }
00445 #endif
00446
00447 }
00448
00449
00450
00451
00452 if (pG->Nx2 > 1) {
00453
00454 #ifdef MPI_PARALLEL
00455
00456
00457 if (pG->rx2_id >= 0 && pG->lx2_id >= 0) {
00458
00459
00460
00461
00462 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx2_id,
00463 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00464 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00465
00466
00467 cnt_send = packing_ox2_particle(pG, nghost);
00468
00469
00470 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx2_id,
00471 boundary_particle_tag, MPI_COMM_WORLD);
00472 if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00473
00474
00475 if (my_jproc == pD->NGrid_x2-1)
00476 shift_packed_particle(send_buf, cnt_send, 2, -Lx2);
00477
00478
00479 err = MPI_Wait(&rq, &stat);
00480 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00481
00482
00483 if ((cnt_recv+1) >= recv_bufsize)
00484 realloc_recvbuf();
00485
00486
00487 if (cnt_recv > 0) {
00488
00489 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00490 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00491 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00492 }
00493
00494 if (cnt_send > 0) {
00495
00496 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00497 boundary_particle_tag, MPI_COMM_WORLD);
00498 if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00499 }
00500
00501 if (cnt_recv > 0) {
00502
00503 err = MPI_Wait(&rq, &stat);
00504 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00505
00506
00507 unpack_particle(pG, recv_buf, cnt_recv);
00508 }
00509
00510
00511
00512
00513 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx2_id,
00514 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00515 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00516
00517
00518 cnt_send = packing_ix2_particle(pG, nghost);
00519
00520
00521 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx2_id,
00522 boundary_particle_tag, MPI_COMM_WORLD);
00523 if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00524
00525 if (my_jproc == 0)
00526 shift_packed_particle(send_buf, cnt_send, 2, Lx2);
00527
00528
00529 err = MPI_Wait(&rq, &stat);
00530 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00531
00532
00533 if ((cnt_recv+1) >= recv_bufsize)
00534 realloc_recvbuf();
00535
00536
00537 if (cnt_recv > 0) {
00538
00539 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00540 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00541 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00542 }
00543
00544 if (cnt_send > 0) {
00545
00546 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00547 boundary_particle_tag, MPI_COMM_WORLD);
00548 if(err) ath_error("[send_ox2_particle]: MPI_Send error = %d\n",err);
00549 }
00550
00551 if (cnt_recv > 0) {
00552
00553 err = MPI_Wait(&rq, &stat);
00554 if(err) ath_error("[receive_ox2_particle]: MPI_Wait error = %d\n",err);
00555
00556
00557 unpack_particle(pG, recv_buf, cnt_recv);
00558 }
00559 }
00560
00561
00562 if (pG->rx2_id >= 0 && pG->lx2_id < 0) {
00563
00564
00565
00566
00567 cnt_send = packing_ox2_particle(pG, nghost);
00568
00569
00570 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx2_id,
00571 boundary_particle_tag, MPI_COMM_WORLD);
00572 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00573
00574
00575 if (cnt_send > 0) {
00576 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00577 boundary_particle_tag, MPI_COMM_WORLD);
00578 if(err) ath_error("[send_ix1_particle]: MPI_Send error = %d\n",err);
00579 }
00580
00581
00582
00583 (*apply_ix2)(pG);
00584
00585
00586
00587
00588 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx2_id,
00589 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00590 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00591
00592
00593 err = MPI_Wait(&rq, &stat);
00594 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00595
00596
00597 if ((cnt_recv+1) >= recv_bufsize)
00598 realloc_recvbuf();
00599
00600 if (cnt_recv > 0) {
00601
00602 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx2_id,
00603 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00604 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00605
00606
00607 err = MPI_Wait(&rq, &stat);
00608 if(err) ath_error("[receive_ox2_particle]: MPI_Wait error = %d\n",err);
00609
00610
00611 unpack_particle(pG, recv_buf, cnt_recv);
00612 }
00613 }
00614
00615
00616 if (pG->rx2_id < 0 && pG->lx2_id >= 0) {
00617
00618
00619
00620
00621 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx2_id,
00622 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00623 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00624
00625
00626 err = MPI_Wait(&rq, &stat);
00627 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00628
00629
00630 if ((cnt_recv+1) >= recv_bufsize)
00631 realloc_recvbuf();
00632 if (cnt_recv > 0) {
00633
00634 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00635 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00636 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00637 }
00638
00639
00640
00641 (*apply_ox2)(pG);
00642
00643
00644
00645
00646 cnt_send = packing_ix2_particle(pG, nghost);
00647
00648
00649 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx2_id,
00650 boundary_particle_tag, MPI_COMM_WORLD);
00651 if(err) ath_error("[send_ix2_particle]: MPI_Send error = %d\n",err);
00652
00653
00654 if (cnt_send > 0) {
00655 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx2_id,
00656 boundary_particle_tag, MPI_COMM_WORLD);
00657 if(err) ath_error("[send_ox2_particle]: MPI_Send error = %d\n",err);
00658 }
00659
00660
00661
00662 if (cnt_recv > 0) {
00663
00664 err = MPI_Wait(&rq, &stat);
00665 if(err) ath_error("[receive_ix2_particle]: MPI_Wait error = %d\n",err);
00666
00667
00668 unpack_particle(pG, recv_buf, cnt_recv);
00669 }
00670 }
00671 #endif
00672
00673
00674 if (pG->rx2_id < 0 && pG->lx2_id < 0) {
00675 (*apply_ix2)(pG);
00676 (*apply_ox2)(pG);
00677 }
00678 }
00679
00680
00681
00682
00683 if (pG->Nx3 > 1){
00684
00685 #ifdef MPI_PARALLEL
00686
00687
00688 if (pG->rx3_id >= 0 && pG->lx3_id >= 0) {
00689
00690
00691
00692
00693 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx3_id,
00694 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00695 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00696
00697
00698 cnt_send = packing_ox3_particle(pG, nghost);
00699
00700
00701 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx3_id,
00702 boundary_particle_tag, MPI_COMM_WORLD);
00703 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00704
00705
00706 if (my_kproc == pD->NGrid_x3-1)
00707 shift_packed_particle(send_buf, cnt_send, 3, -Lx3);
00708
00709
00710 err = MPI_Wait(&rq, &stat);
00711 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00712
00713
00714 if ((cnt_recv+1) >= recv_bufsize)
00715 realloc_recvbuf();
00716
00717
00718 if (cnt_recv > 0) {
00719
00720 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00721 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00722 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00723 }
00724
00725 if (cnt_send > 0) {
00726
00727 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00728 boundary_particle_tag, MPI_COMM_WORLD);
00729 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00730 }
00731
00732 if (cnt_recv > 0) {
00733
00734 err = MPI_Wait(&rq, &stat);
00735 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00736
00737
00738 unpack_particle(pG, recv_buf, cnt_recv);
00739 }
00740
00741
00742
00743
00744 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx3_id,
00745 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00746 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00747
00748
00749 cnt_send = packing_ix3_particle(pG, nghost);
00750
00751
00752 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx3_id,
00753 boundary_particle_tag, MPI_COMM_WORLD);
00754 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00755
00756 if (my_kproc == 0)
00757 shift_packed_particle(send_buf, cnt_send, 3, Lx3);
00758
00759
00760 err = MPI_Wait(&rq, &stat);
00761 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00762
00763
00764 if ((cnt_recv+1) >= recv_bufsize)
00765 realloc_recvbuf();
00766
00767
00768 if (cnt_recv > 0) {
00769
00770 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00771 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00772 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00773 }
00774
00775 if (cnt_send > 0) {
00776
00777 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00778 boundary_particle_tag, MPI_COMM_WORLD);
00779 if(err) ath_error("[send_ox3_particle]: MPI_Send error = %d\n",err);
00780 }
00781
00782 if (cnt_recv > 0) {
00783
00784 err = MPI_Wait(&rq, &stat);
00785 if(err) ath_error("[receive_ox3_particle]: MPI_Wait error = %d\n",err);
00786
00787
00788 unpack_particle(pG, recv_buf, cnt_recv);
00789 }
00790 }
00791
00792
00793 if (pG->rx3_id >= 0 && pG->lx3_id < 0) {
00794
00795
00796
00797
00798 cnt_send = packing_ox3_particle(pG, nghost);
00799
00800
00801 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->rx3_id,
00802 boundary_particle_tag, MPI_COMM_WORLD);
00803 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00804
00805
00806 if (cnt_send > 0) {
00807 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00808 boundary_particle_tag, MPI_COMM_WORLD);
00809 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00810 }
00811
00812
00813
00814 (*apply_ix3)(pG);
00815
00816
00817
00818
00819 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->rx3_id,
00820 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00821 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00822
00823 err = MPI_Wait(&rq, &stat);
00824 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00825
00826
00827 if ((cnt_recv+1) >= recv_bufsize)
00828 realloc_recvbuf();
00829
00830 if (cnt_recv > 0) {
00831
00832 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->rx3_id,
00833 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00834 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00835
00836 err = MPI_Wait(&rq, &stat);
00837 if(err) ath_error("[receive_ox3_particle]: MPI_Wait error = %d\n",err);
00838
00839
00840 unpack_particle(pG, recv_buf, cnt_recv);
00841 }
00842 }
00843
00844
00845 if (pG->rx3_id < 0 && pG->lx3_id >= 0) {
00846
00847
00848
00849
00850 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, pG->lx3_id,
00851 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00852 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00853
00854
00855 err = MPI_Wait(&rq, &stat);
00856 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00857
00858
00859 if ((cnt_recv+1) >= recv_bufsize)
00860 realloc_recvbuf();
00861
00862 if (cnt_recv > 0) {
00863
00864 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00865 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00866 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00867 }
00868
00869
00870
00871 (*apply_ox3)(pG);
00872
00873
00874
00875
00876 cnt_send = packing_ix3_particle(pG, nghost);
00877
00878
00879 err = MPI_Send(&cnt_send, 1, MPI_LONG, pG->lx3_id,
00880 boundary_particle_tag, MPI_COMM_WORLD);
00881 if(err) ath_error("[send_ix3_particle]: MPI_Send error = %d\n",err);
00882
00883 if (cnt_send > 0) {
00884
00885 err = MPI_Send(send_buf, cnt_send*NVAR_P, MPI_DOUBLE, pG->lx3_id,
00886 boundary_particle_tag, MPI_COMM_WORLD);
00887 if(err) ath_error("[send_ox3_particle]: MPI_Send error = %d\n",err);
00888 }
00889
00890
00891
00892 if (cnt_recv > 0) {
00893
00894 err = MPI_Wait(&rq, &stat);
00895 if(err) ath_error("[receive_ix3_particle]: MPI_Wait error = %d\n",err);
00896
00897
00898 unpack_particle(pG, recv_buf, cnt_recv);
00899 }
00900 }
00901 #endif
00902
00903
00904 if (pG->rx3_id < 0 && pG->lx3_id < 0) {
00905 (*apply_ix3)(pG);
00906 (*apply_ox3)(pG);
00907 }
00908 }
00909
00910
00911
00912 update_particle_status(pG);
00913
00914 return;
00915 }
00916
00917
00918 #ifdef FARGO
00919
00920
00921
00922 void advect_particles(Grid *pG, Domain *pD)
00923 {
00924 Grain *cur;
00925 long p;
00926 Real x1l, x1u;
00927 #ifdef MPI_PARALLEL
00928 long cnt_recv, n;
00929 int ishl, ishu, i;
00930 int inds, indr, ids, idr;
00931 Real x2len, yl, yu;
00932 int err;
00933 MPI_Request rq;
00934 MPI_Status stat;
00935 #endif
00936
00937
00938 x1l = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
00939 x1u = pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1;
00940
00941
00942 for (p=0; p<pG->nparticle; p++) {
00943 cur = &(pG->particle[p]);
00944 cur->x2 = x2min + fmod(cur->x2 + pG->parsub[p].shift - x2min + Lx2, Lx2);
00945 }
00946
00947 #ifdef MPI_PARALLEL
00948
00949 x2len = pG->dx2*pG->Nx2;
00950 ishl = gridshift((qshear*Omega_0*(x1l-pG->dx1)*pG->dt - pG->dx2)/x2len);
00951 ishu = gridshift((qshear*Omega_0*(x1u+pG->dx1)*pG->dt + pG->dx2)/x2len);
00952
00953
00954 for (i=ishl; i<=ishu; i++)
00955 if (i != 0) {
00956
00957
00958 inds = my_jproc + i;
00959 if (inds < 0) inds += pD->NGrid_x2;
00960 if (inds > (pD->NGrid_x2-1)) inds -= pD->NGrid_x2;
00961 ids = pD->GridArray[my_kproc][inds][my_iproc].id;
00962
00963 indr = my_jproc - i;
00964 if (indr < 0) indr += pD->NGrid_x2;
00965 if (indr > (pD->NGrid_x2-1)) indr -= pD->NGrid_x2;
00966 idr = pD->GridArray[my_kproc][indr][my_iproc].id;
00967
00968
00969 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, idr, boundary_particle_tag,
00970 MPI_COMM_WORLD, &rq);
00971 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00972
00973
00974 yl = x2min + inds*x2len; yu = x2min + (inds+1)*x2len;
00975 n = packing_particle_fargo(pG, yl, yu);
00976
00977
00978 err = MPI_Send(&n, 1, MPI_LONG, ids, boundary_particle_tag, MPI_COMM_WORLD);
00979 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
00980
00981
00982 err = MPI_Wait(&rq, &stat);
00983 if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
00984
00985
00986 if ((cnt_recv+1) >= recv_bufsize)
00987 realloc_recvbuf();
00988
00989
00990 if (cnt_recv > 0) {
00991 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, idr,
00992 boundary_particle_tag, MPI_COMM_WORLD, &rq);
00993 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
00994 }
00995
00996
00997 if (n > 0) {
00998 err = MPI_Send(send_buf, n*NVAR_P, MPI_DOUBLE, ids,
00999 boundary_particle_tag, MPI_COMM_WORLD);
01000 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
01001 }
01002
01003
01004 if (cnt_recv > 0) {
01005 err = MPI_Wait(&rq, &stat);
01006 if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
01007
01008
01009 unpack_particle(pG, recv_buf, cnt_recv);
01010 }
01011 }
01012 #endif
01013
01014 return;
01015 }
01016 #endif
01017
01018
01019
01020
01021
01022
01023 void set_bvals_particle_init(Grid *pG, Domain *pD)
01024 {
01025 int ibc_x1, obc_x1;
01026 int ibc_x2, obc_x2;
01027 int ibc_x3, obc_x3;
01028 #ifdef MPI_PARALLEL
01029 int ib,jb,kb;
01030 int my_id = pG->my_id;
01031 #endif
01032
01033
01034 NBUF = (long)(0.15*pG->arrsize);
01035
01036 send_bufsize = NBUF;
01037 recv_bufsize = NBUF;
01038 send_buf = (double*)calloc_1d_array(NVAR_P*send_bufsize, sizeof(double));
01039 recv_buf = (double*)calloc_1d_array(NVAR_P*recv_bufsize, sizeof(double));
01040
01041
01042 #ifdef FEEDBACK
01043 nbc = 4;
01044 #else
01045 nbc = 1;
01046 #endif
01047
01048
01049 x1min = par_getd("grid","x1min");
01050 x1max = par_getd("grid","x1max");
01051 Lx1 = x1max - x1min;
01052
01053 x2min = par_getd("grid","x2min");
01054 x2max = par_getd("grid","x2max");
01055 Lx2 = x2max - x2min;
01056
01057 x3min = par_getd("grid","x3min");
01058 x3max = par_getd("grid","x3max");
01059 Lx3 = x3max - x3min;
01060
01061 get_myGridIndex(pD, pG->my_id, &my_iproc, &my_jproc, &my_kproc);
01062
01063
01064 NShuffle = par_geti_def("particle","nshuf",0);
01065
01066 #ifdef SHEARING_BOX
01067
01068 vshear = qshear * Omega_0 * Lx1;
01069 #endif
01070
01071
01072
01073 if(pG->Nx1 > 1) {
01074 if(apply_ix1 == NULL){
01075
01076 ibc_x1 = par_geti("grid","ibc_x1");
01077 switch(ibc_x1){
01078
01079 case 1:
01080 apply_ix1 = reflect_ix1_particle;
01081 break;
01082 case 5:
01083 apply_ix1 = reflect_ix1_particle;
01084 break;
01085
01086 case 2:
01087 apply_ix1 = outflow_particle;
01088 break;
01089
01090 case 4:
01091 apply_ix1 = periodic_ix1_particle;
01092 #ifdef MPI_PARALLEL
01093 if(pG->lx1_id < 0 && pD->NGrid_x1 > 1){
01094 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01095 pG->lx1_id = pD->GridArray[kb][jb][pD->NGrid_x1-1].id;
01096 }
01097 #endif
01098 break;
01099
01100 default:
01101 ath_perr(-1,"[set_bvals_particle_init]: ibc_x1 = %d unknown\n",ibc_x1);
01102 exit(EXIT_FAILURE);
01103 }
01104
01105 }
01106
01107 if(apply_ox1 == NULL){
01108
01109 obc_x1 = par_geti("grid","obc_x1");
01110 switch(obc_x1){
01111
01112 case 1:
01113 apply_ox1 = reflect_ox1_particle;
01114 break;
01115 case 5:
01116 apply_ox1 = reflect_ox1_particle;
01117 break;
01118
01119 case 2:
01120 apply_ox1 = outflow_particle;
01121 break;
01122
01123 case 4:
01124 apply_ox1 = periodic_ox1_particle;
01125 #ifdef MPI_PARALLEL
01126 if(pG->rx1_id < 0 && pD->NGrid_x1 > 1){
01127 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01128 pG->rx1_id = pD->GridArray[kb][jb][0].id;
01129 }
01130 #endif
01131 break;
01132
01133 default:
01134 ath_perr(-1,"[set_bvals_particle_init]: obc_x1 = %d unknown\n",obc_x1);
01135 exit(EXIT_FAILURE);
01136 }
01137
01138 }
01139 }
01140
01141
01142
01143 if(pG->Nx2 > 1) {
01144 if(apply_ix2 == NULL){
01145
01146 ibc_x2 = par_geti("grid","ibc_x2");
01147 switch(ibc_x2){
01148
01149 case 1:
01150 apply_ix2 = reflect_ix2_particle;
01151 break;
01152 case 5:
01153 apply_ix2 = reflect_ix2_particle;
01154 break;
01155
01156 case 2:
01157 apply_ix2 = outflow_particle;
01158 break;
01159
01160 case 4:
01161 apply_ix2 = periodic_ix2_particle;
01162 #ifdef MPI_PARALLEL
01163 if(pG->lx2_id < 0 && pD->NGrid_x2 > 1){
01164 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01165 pG->lx2_id = pD->GridArray[kb][pD->NGrid_x2-1][ib].id;
01166 }
01167 #endif
01168 break;
01169
01170 default:
01171 ath_perr(-1,"[set_bvals_particle_init]: ibc_x2 = %d unknown\n",ibc_x2);
01172 exit(EXIT_FAILURE);
01173 }
01174
01175 }
01176
01177 if(apply_ox2 == NULL){
01178
01179 obc_x2 = par_geti("grid","obc_x2");
01180 switch(obc_x2){
01181
01182 case 1:
01183 apply_ox2 = reflect_ox2_particle;
01184 break;
01185 case 5:
01186 apply_ox2 = reflect_ox2_particle;
01187 break;
01188
01189 case 2:
01190 apply_ox2 = outflow_particle;
01191 break;
01192
01193 case 4:
01194 apply_ox2 = periodic_ox2_particle;
01195 #ifdef MPI_PARALLEL
01196 if(pG->rx2_id < 0 && pD->NGrid_x2 > 1){
01197 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01198 pG->rx2_id = pD->GridArray[kb][0][ib].id;
01199 }
01200 #endif
01201 break;
01202
01203 default:
01204 ath_perr(-1,"[set_bvals_particle_init]: obc_x2 = %d unknown\n",obc_x2);
01205 exit(EXIT_FAILURE);
01206 }
01207
01208 }
01209 }
01210
01211
01212
01213 if(pG->Nx3 > 1) {
01214 if(apply_ix3 == NULL){
01215
01216 ibc_x3 = par_geti("grid","ibc_x3");
01217 switch(ibc_x3){
01218
01219 case 1:
01220 apply_ix3 = reflect_ix3_particle;
01221 break;
01222 case 5:
01223 apply_ix3 = reflect_ix3_particle;
01224 break;
01225
01226 case 2:
01227 apply_ix3 = outflow_particle;
01228 break;
01229
01230 case 4:
01231 apply_ix3 = periodic_ix3_particle;
01232 #ifdef MPI_PARALLEL
01233 if(pG->lx3_id < 0 && pD->NGrid_x3 > 1){
01234 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01235 pG->lx3_id = pD->GridArray[pD->NGrid_x3-1][jb][ib].id;
01236 }
01237 #endif
01238 break;
01239
01240 default:
01241 ath_perr(-1,"[set_bvals_particle_init]: ibc_x3 = %d unknown\n",ibc_x3);
01242 exit(EXIT_FAILURE);
01243 }
01244
01245 }
01246
01247 if(apply_ox3 == NULL){
01248
01249 obc_x3 = par_geti("grid","obc_x3");
01250 switch(obc_x3){
01251
01252 case 1:
01253 apply_ox3 = reflect_ox3_particle;
01254 break;
01255 case 5:
01256 apply_ox3 = reflect_ox3_particle;
01257 break;
01258
01259 case 2:
01260 apply_ox3 = outflow_particle;
01261 break;
01262
01263 case 4:
01264 apply_ox3 = periodic_ox3_particle;
01265 #ifdef MPI_PARALLEL
01266 if(pG->rx3_id < 0 && pD->NGrid_x3 > 1){
01267 get_myGridIndex(pD, my_id, &ib, &jb, &kb);
01268 pG->rx3_id = pD->GridArray[0][jb][ib].id;
01269 }
01270 #endif
01271 break;
01272
01273 default:
01274 ath_perr(-1,"[set_bvals_particle_init]: obc_x3 = %d unknown\n",obc_x3);
01275 exit(EXIT_FAILURE);
01276 }
01277
01278 }
01279 }
01280
01281 return;
01282 }
01283
01284
01285
01286
01287
01288
01289 void set_bvals_particle_fun(enum Direction dir, VBCFun_t prob_bc)
01290 {
01291 switch(dir){
01292 case left_x1:
01293 apply_ix1 = prob_bc;
01294 break;
01295 case right_x1:
01296 apply_ox1 = prob_bc;
01297 break;
01298 case left_x2:
01299 apply_ix2 = prob_bc;
01300 break;
01301 case right_x2:
01302 apply_ox2 = prob_bc;
01303 break;
01304 case left_x3:
01305 apply_ix3 = prob_bc;
01306 break;
01307 case right_x3:
01308 apply_ox3 = prob_bc;
01309 break;
01310 default:
01311 ath_perr(-1,"[set_bvals_particle_fun]: Unknown direction = %d\n",dir);
01312 exit(EXIT_FAILURE);
01313 }
01314 return;
01315 }
01316
01317
01318
01319 void set_bvals_particle_destruct(Grid *pG, Domain *pD)
01320 {
01321 apply_ix1 = NULL;
01322 apply_ox1 = NULL;
01323 apply_ix2 = NULL;
01324 apply_ox2 = NULL;
01325 apply_ix3 = NULL;
01326 apply_ox3 = NULL;
01327 free(send_buf);
01328 free(recv_buf);
01329 send_bufsize = 0;
01330 recv_bufsize = 0;
01331 return;
01332 }
01333
01334
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347
01348
01349
01350
01351
01352 static void realloc_sendbuf()
01353 {
01354 send_bufsize += NBUF;
01355 ath_pout(1,"[set_bvals_prticles]: reallocating send buffer...");
01356 if ((send_buf = (double*)realloc(send_buf,
01357 NVAR_P*(send_bufsize)*sizeof(double))) == NULL)
01358 ath_error("[set_bvals_prticles]: failed to allocate memory for buffer.\n");
01359
01360 return;
01361 }
01362
01363
01364
01365
01366 static void realloc_recvbuf()
01367 {
01368 recv_bufsize += NBUF;
01369 ath_pout(1,"[set_bvals_prticles]: reallocating receive buffer...");
01370 if ((recv_buf = (double*)realloc(recv_buf,
01371 NVAR_P*(recv_bufsize)*sizeof(double))) == NULL)
01372 ath_error("[set_bvals_prticles]: failed to allocate memory for buffer.\n");
01373
01374 return;
01375 }
01376
01377
01378
01379
01380
01381 static void update_particle_status(Grid *pG)
01382 {
01383 long p;
01384 Grain *cur;
01385
01386 for (p=0; p<pG->nparticle; p++) {
01387 cur = &(pG->particle[p]);
01388 if (cur->pos >= 10)
01389 {
01390 if ((cur->x1>=x1upar) || (cur->x1<x1lpar) || (cur->x2>=x2upar) ||
01391 (cur->x2<x2lpar) || (cur->x3>=x3upar) || (cur->x3<x3lpar))
01392 cur->pos = 0;
01393
01394 else
01395 cur->pos = 1;
01396 }
01397 }
01398
01399 return;
01400 }
01401
01402
01403
01404
01405 static void reflect_ix1_particle(Grid *pG)
01406 {
01407 Real x1b2;
01408 long n, n0, p;
01409
01410
01411 x1b2 = 2.0*(pG->x1_0 + (pG->is + pG->idisp)*pG->dx1);
01412
01413
01414 n = packing_ix1_particle(pG, nghost);
01415
01416
01417 n0 = pG->nparticle;
01418
01419
01420 unpack_particle(pG, send_buf, n);
01421
01422
01423 for (p=n0; p<pG->nparticle; p++)
01424 {
01425 pG->particle[p].x1 = x1b2 - pG->particle[p].x1;
01426 pG->particle[p].v1 = -pG->particle[p].v1;
01427 }
01428
01429 return;
01430 }
01431
01432
01433
01434
01435
01436 static void reflect_ox1_particle(Grid *pG)
01437 {
01438 Real x1b2;
01439 long n, n0, p;
01440
01441
01442 x1b2 = 2.0*(pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1);
01443
01444
01445 n = packing_ox1_particle(pG, nghost);
01446
01447
01448 n0 = pG->nparticle;
01449
01450
01451 unpack_particle(pG, send_buf, n);
01452
01453
01454 for (p=n0; p<pG->nparticle; p++)
01455 {
01456 pG->particle[p].x1 = x1b2 - pG->particle[p].x1;
01457 pG->particle[p].v1 = -pG->particle[p].v1;
01458 }
01459
01460 return;
01461 }
01462
01463
01464
01465
01466
01467 static void reflect_ix2_particle(Grid *pG)
01468 {
01469 Real x2b2;
01470 long n, n0, p;
01471
01472
01473 x2b2 = 2.0*(pG->x2_0 + (pG->js + pG->jdisp)*pG->dx2);
01474
01475
01476 n = packing_ix2_particle(pG, nghost);
01477
01478
01479 n0 = pG->nparticle;
01480
01481
01482 unpack_particle(pG, send_buf, n);
01483
01484
01485 for (p=n0; p<pG->nparticle; p++)
01486 {
01487 pG->particle[p].x2 = x2b2 - pG->particle[p].x2;
01488 pG->particle[p].v2 = -pG->particle[p].v2;
01489 }
01490
01491 return;
01492 }
01493
01494
01495
01496
01497
01498 static void reflect_ox2_particle(Grid *pG)
01499 {
01500 Real x2b2;
01501 long n, n0, p;
01502
01503
01504 x2b2 = 2.0*(pG->x2_0 + (pG->je+1 + pG->jdisp)*pG->dx2);
01505
01506
01507 n = packing_ox2_particle(pG, nghost);
01508
01509
01510 n0 = pG->nparticle;
01511
01512
01513 unpack_particle(pG, send_buf, n);
01514
01515
01516 for (p=n0; p<pG->nparticle; p++)
01517 {
01518 pG->particle[p].x2 = x2b2 - pG->particle[p].x2;
01519 pG->particle[p].v2 = -pG->particle[p].v2;
01520 }
01521
01522 return;
01523 }
01524
01525
01526
01527
01528
01529 static void reflect_ix3_particle(Grid *pG)
01530 {
01531 Real x3b2;
01532 long n, n0, p;
01533
01534
01535 x3b2 = 2.0*(pG->x3_0 + (pG->ks + pG->kdisp)*pG->dx3);
01536
01537
01538 n = packing_ix3_particle(pG, nghost);
01539
01540
01541 n0 = pG->nparticle;
01542
01543
01544 unpack_particle(pG, send_buf, n);
01545
01546
01547 for (p=n0; p<pG->nparticle; p++)
01548 {
01549 pG->particle[p].x3 = x3b2 - pG->particle[p].x3;
01550 pG->particle[p].v3 = -pG->particle[p].v3;
01551 }
01552
01553 return;
01554 }
01555
01556
01557
01558
01559
01560 static void reflect_ox3_particle(Grid *pG)
01561 {
01562 Real x3b2;
01563 long n, n0, p;
01564
01565
01566 x3b2 = 2.0*(pG->x3_0 + (pG->ke+1 + pG->kdisp)*pG->dx3);
01567
01568
01569 n = packing_ox3_particle(pG, nghost);
01570
01571
01572 n0 = pG->nparticle;
01573
01574
01575 unpack_particle(pG, send_buf, n);
01576
01577
01578
01579 for (p=n0; p<pG->nparticle; p++)
01580 {
01581 pG->particle[p].x3 = x3b2 - pG->particle[p].x3;
01582 pG->particle[p].v3 = -pG->particle[p].v3;
01583 }
01584
01585 return;
01586 }
01587
01588
01589
01590
01591
01592
01593
01594
01595
01596 static void outflow_particle(Grid *pG)
01597 {
01598 return;
01599 }
01600
01601
01602
01603
01604
01605
01606
01607 static void periodic_ix1_particle(Grid *pG)
01608 {
01609 long n = 0;
01610 #ifdef SHEARING_BOX
01611
01612 int vyind;
01613
01614 if (pG->Nx3 > 1)
01615 vyind = 5;
01616 else
01617 vyind = 6;
01618 #endif
01619
01620
01621 n = packing_ox1_particle(pG, nghost);
01622
01623
01624 shift_packed_particle(send_buf, n, 1, -Lx1);
01625
01626 #ifdef SHEARING_BOX
01627 #ifndef FARGO
01628
01629 shift_packed_particle(send_buf, n, vyind, vshear);
01630 #endif
01631 #endif
01632
01633
01634 unpack_particle(pG, send_buf, n);
01635
01636 return;
01637 }
01638
01639
01640
01641
01642
01643
01644
01645 static void periodic_ox1_particle(Grid *pG)
01646 {
01647 long n = 0;
01648 #ifdef SHEARING_BOX
01649
01650 int vyind;
01651
01652 if (pG->Nx3 > 1)
01653 vyind = 5;
01654 else
01655 vyind = 6;
01656 #endif
01657
01658
01659 n = packing_ix1_particle(pG, nghost);
01660
01661
01662 shift_packed_particle(send_buf, n, 1, Lx1);
01663
01664 #ifdef SHEARING_BOX
01665 #ifndef FARGO
01666
01667 shift_packed_particle(send_buf, n, vyind, -vshear);
01668 #endif
01669 #endif
01670
01671
01672 unpack_particle(pG, send_buf, n);
01673
01674 return;
01675 }
01676
01677
01678
01679
01680
01681 static void periodic_ix2_particle(Grid *pG)
01682 {
01683 long n = 0;
01684
01685
01686 n = packing_ox2_particle(pG, nghost);
01687
01688
01689 shift_packed_particle(send_buf, n, 2, -Lx2);
01690
01691
01692 unpack_particle(pG, send_buf, n);
01693
01694 return;
01695 }
01696
01697
01698
01699
01700
01701 static void periodic_ox2_particle(Grid *pG)
01702 {
01703 long n = 0;
01704
01705
01706 n = packing_ix2_particle(pG, nghost);
01707
01708
01709 shift_packed_particle(send_buf, n, 2, Lx2);
01710
01711
01712 unpack_particle(pG, send_buf, n);
01713
01714 return;
01715 }
01716
01717
01718
01719
01720
01721 static void periodic_ix3_particle(Grid *pG)
01722 {
01723 long n = 0;
01724
01725
01726 n = packing_ox3_particle(pG, nghost);
01727
01728
01729 shift_packed_particle(send_buf, n, 3, -Lx3);
01730
01731
01732 unpack_particle(pG, send_buf, n);
01733
01734 return;
01735 }
01736
01737
01738
01739
01740
01741 static void periodic_ox3_particle(Grid *pG)
01742 {
01743 long n = 0;
01744
01745
01746 n = packing_ix3_particle(pG, nghost);
01747
01748
01749 shift_packed_particle(send_buf, n, 3, Lx3);
01750
01751
01752 unpack_particle(pG, send_buf, n);
01753
01754 return;
01755 }
01756
01757
01758
01759
01760
01761
01762
01763
01764
01765
01766
01767 static long packing_ix1_particle(Grid *pG, int nlayer)
01768 {
01769 Grain *cur;
01770 Real x1l,x1u;
01771 long p, n = 0;
01772 double *pd = send_buf;
01773
01774
01775 x1l = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
01776 x1u = pG->x1_0 + (pG->is+nlayer + pG->idisp)*pG->dx1;
01777
01778
01779 for (p=0; p<pG->nparticle; p++) {
01780 cur = &(pG->particle[p]);
01781
01782 if (cur->x1 < x1u) {
01783 if ((cur->pos == 0) || (cur->pos == 1))
01784 {
01785 if (cur->x1 >= x1l) {
01786 packing_one_particle(cur, n, 0);
01787 n += 1;
01788 }
01789 }
01790
01791 else if (cur->pos != 21)
01792 {
01793
01794 packing_one_particle(cur, n, 11);
01795
01796 n += 1;
01797 }
01798 }
01799 }
01800
01801 return n;
01802 }
01803
01804
01805
01806
01807
01808
01809
01810
01811
01812
01813
01814 static long packing_ox1_particle(Grid *pG, int nlayer)
01815 {
01816 Grain *cur;
01817 Real x1l,x1u;
01818 long p, n = 0;
01819 double *pd = send_buf;
01820
01821
01822 x1l = pG->x1_0 + (pG->ie-nlayer+1 + pG->idisp)*pG->dx1;
01823 x1u = pG->x1_0 + (pG->ie+1 + pG->idisp)*pG->dx1;
01824
01825
01826 for (p=0; p<pG->nparticle; p++) {
01827 cur = &(pG->particle[p]);
01828
01829 if (cur->x1 >= x1l) {
01830 if ((cur->pos == 0) || (cur->pos == 1))
01831 {
01832 if (cur->x1 < x1u) {
01833 packing_one_particle(cur, n, 0);
01834 n += 1;
01835 }
01836 }
01837 else if (cur->pos != 11)
01838 {
01839
01840 packing_one_particle(cur, n, 21);
01841 n += 1;
01842 }
01843 }
01844 }
01845
01846 return n;
01847 }
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859 static long packing_ix2_particle(Grid *pG, int nlayer)
01860 {
01861 Grain *cur;
01862 Real x2l,x2u;
01863 long p, n = 0;
01864 double *pd = send_buf;
01865
01866
01867 x2l = pG->x2_0 + (pG->js + pG->jdisp)*pG->dx2;
01868 x2u = pG->x2_0 + (pG->js+nlayer + pG->jdisp)*pG->dx2;
01869
01870
01871 for (p=0; p<pG->nparticle; p++) {
01872 cur = &(pG->particle[p]);
01873
01874 if (cur->x2 < x2u) {
01875 if ((cur->pos == 0) || (cur->pos == 1))
01876 {
01877 if (cur->x2 >= x2l) {
01878 packing_one_particle(cur, n, 0);
01879 n += 1;
01880 }
01881 }
01882 else if (cur->pos != 22)
01883 {
01884
01885 packing_one_particle(cur, n, 12);
01886 n += 1;
01887 }
01888 }
01889 }
01890
01891 return n;
01892 }
01893
01894
01895
01896
01897
01898
01899
01900
01901
01902
01903
01904 static long packing_ox2_particle(Grid *pG, int nlayer)
01905 {
01906 Grain *cur;
01907 Real x2l,x2u;
01908 long p, n = 0;
01909 double *pd = send_buf;
01910
01911
01912 x2l = pG->x2_0 + (pG->je-nlayer+1 + pG->jdisp)*pG->dx2;
01913 x2u = pG->x2_0 + (pG->je+1 + pG->jdisp)*pG->dx2;
01914
01915
01916 for (p=0; p<pG->nparticle; p++) {
01917 cur = &(pG->particle[p]);
01918
01919 if (cur->x2 >= x2l) {
01920 if ((cur->pos == 0) || (cur->pos == 1))
01921 {
01922 if (cur->x2 < x2u) {
01923 packing_one_particle(cur, n, 0);
01924 n += 1;
01925 }
01926 }
01927 else if (cur->pos != 12)
01928 {
01929
01930 packing_one_particle(cur, n, 22);
01931 n += 1;
01932 }
01933 }
01934 }
01935
01936 return n;
01937 }
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949 static long packing_ix3_particle(Grid *pG, int nlayer)
01950 {
01951 Grain *cur;
01952 Real x3l,x3u;
01953 long p, n = 0;
01954 double *pd = send_buf;
01955
01956
01957 x3l = pG->x3_0 + (pG->ks + pG->kdisp)*pG->dx3;
01958 x3u = pG->x3_0 + (pG->ks+nlayer + pG->kdisp)*pG->dx3;
01959
01960
01961 for (p=0; p<pG->nparticle; p++) {
01962 cur = &(pG->particle[p]);
01963
01964 if (cur->x3 < x3u) {
01965 if ((cur->pos == 0) || (cur->pos == 1))
01966 {
01967 if (cur->x3 >= x3l) {
01968 packing_one_particle(cur, n, 0);
01969 n += 1;
01970 }
01971 }
01972 else if (cur->pos != 23)
01973 {
01974
01975 packing_one_particle(cur, n, 13);
01976 n += 1;
01977 }
01978 }
01979 }
01980 return n;
01981 }
01982
01983
01984
01985
01986
01987
01988
01989
01990
01991
01992
01993 static long packing_ox3_particle(Grid *pG, int nlayer)
01994 {
01995 Grain *cur;
01996 Real x3l,x3u;
01997 long p, n = 0;
01998
01999
02000 x3l = pG->x3_0 + (pG->ke-nlayer+1 + pG->kdisp)*pG->dx3;
02001 x3u = pG->x3_0 + (pG->ke+1 + pG->kdisp)*pG->dx3;
02002
02003
02004 for (p=0; p<pG->nparticle; p++) {
02005 cur = &(pG->particle[p]);
02006
02007 if (cur->x3 >= x3l) {
02008 if ((cur->pos == 0) || (cur->pos == 1))
02009 {
02010 if (cur->x3 < x3u) {
02011 packing_one_particle(cur, n, 0);
02012 n += 1;
02013 }
02014 }
02015 else if (cur->pos != 13)
02016 {
02017
02018 packing_one_particle(cur, n, 23);
02019 n += 1;
02020 }
02021 }
02022 }
02023 return n;
02024 }
02025
02026
02027
02028
02029
02030
02031
02032
02033
02034
02035
02036
02037 static void packing_one_particle(Grain *cur, long n, short pos)
02038 {
02039 double *pd;
02040 if ((n+2) > send_bufsize) {
02041 realloc_sendbuf();
02042 }
02043 pd = &(send_buf[NVAR_P*n]);
02044
02045
02046 *(pd++) = cur->x1;
02047 *(pd++) = cur->x2;
02048 *(pd++) = cur->x3;
02049 *(pd++) = cur->v1;
02050 *(pd++) = cur->v2;
02051 *(pd++) = cur->v3;
02052 *(pd++) = (double)(cur->property)+0.01;
02053 *(pd++) = (double)(pos)+0.01;
02054 *(pd++) = (double)(cur->my_id)+0.01;
02055 #ifdef MPI_PARALLEL
02056 *(pd++) = (double)(cur->init_id)+0.01;
02057 #endif
02058
02059 return;
02060 }
02061
02062
02063
02064
02065
02066
02067
02068
02069
02070
02071
02072
02073
02074
02075 static void shift_packed_particle(double *buf, long n, int index, double shift)
02076 {
02077 double *pd = buf;
02078 long i;
02079
02080 if (n>0) pd += index-1;
02081 for (i=0; i<n-1; i++) {
02082 *pd += shift;
02083 pd += NVAR_P;
02084 }
02085 *pd += shift;
02086
02087 return;
02088 }
02089
02090
02091
02092
02093
02094
02095
02096
02097
02098
02099
02100 static void unpack_particle(Grid *pG, double *buf, long n)
02101 {
02102 Grain *cur;
02103 Grain *newgr;
02104 double *pd = buf;
02105 long p, i;
02106
02107
02108 p = pG->nparticle;
02109 pG->nparticle += n;
02110 if (pG->nparticle >= pG->arrsize)
02111 particle_realloc(pG, pG->nparticle+1);
02112
02113 for (i=p; i<pG->nparticle; i++) {
02114 cur = &(pG->particle[i]);
02115 cur->x1 = *(pd++);
02116 cur->x2 = *(pd++);
02117 cur->x3 = *(pd++);
02118 cur->v1 = *(pd++);
02119 cur->v2 = *(pd++);
02120 cur->v3 = *(pd++);
02121 cur->property = (int)(*(pd++));
02122 pG->grproperty[cur->property].num += 1;
02123 cur->pos = (short)(*(pd++));
02124 cur->my_id = (long)(*(pd++));
02125 #ifdef MPI_PARALLEL
02126 cur->init_id = (int)(*(pd++));
02127 #endif
02128 }
02129
02130 return;
02131 }
02132
02133 #ifdef SHEARING_BOX
02134
02135
02136
02137
02138
02139
02140
02141 static void shearingbox_ix1_particle(Grid *pG, Domain *pD, long numpar)
02142 {
02143 #ifdef MPI_PARALLEL
02144
02145 Real yshear, yshift;
02146 Real x2len;
02147 int id1s, id1r, id2s, id2r;
02148 int ind1, ind2, ind3;
02149 long n1, n2;
02150 long cnt_recv;
02151 int err;
02152 MPI_Request rq;
02153 MPI_Status stat;
02154 #endif
02155
02156 #ifndef MPI_PARALLEL
02157 packing_ix1_particle_shear(pG, 0, numpar);
02158 #else
02159
02160
02161
02162 yshear = vshear*pG->time;
02163 yshift = fmod(yshear, Lx2);
02164 x2len = pG->dx2*pG->Nx2;
02165
02166
02167 ind1 = 0; ind3 = my_kproc;
02168 ind2 = my_jproc + (int)(yshift/x2len) + 1;
02169 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02170 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02171 id1s = pD->GridArray[ind3][ind2][ind1].id;
02172
02173 ind2 = my_jproc + (int)(yshift/x2len);
02174 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02175 id2s = pD->GridArray[ind3][ind2][ind1].id;
02176
02177 ind2 = my_jproc - (int)(yshift/x2len) - 1;
02178 if (ind2 < 0) ind2 += pD->NGrid_x2;
02179 if (ind2 < 0) ind2 += pD->NGrid_x2;
02180 id1r = pD->GridArray[ind3][ind2][ind1].id;
02181
02182 ind2 = my_jproc - (int)(yshift/x2len);
02183 if (ind2 < 0) ind2 += pD->NGrid_x2;
02184 id2r = pD->GridArray[ind3][ind2][ind1].id;
02185
02186
02187
02188
02189 n1 = packing_ix1_particle_shear(pG, 1, numpar);
02190
02191
02192
02193
02194
02195 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id1r,
02196 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02197 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02198
02199
02200 err = MPI_Send(&n1, 1, MPI_LONG, id1s, boundary_particle_tag, MPI_COMM_WORLD);
02201 if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02202
02203
02204 err = MPI_Wait(&rq, &stat);
02205 if(err) ath_error("[receive_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02206
02207
02208 if ((cnt_recv+1) >= recv_bufsize)
02209 realloc_recvbuf();
02210
02211
02212 if (cnt_recv > 0) {
02213
02214 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id1r,
02215 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02216 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02217 }
02218
02219 if (n1 > 0) {
02220
02221 err = MPI_Send(send_buf, n1*NVAR_P, MPI_DOUBLE, id1s,
02222 boundary_particle_tag, MPI_COMM_WORLD);
02223 if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02224 }
02225
02226
02227 n2 = packing_ix1_particle_shear(pG, 2, numpar);
02228 if (cnt_recv > 0) {
02229
02230 err = MPI_Wait(&rq, &stat);
02231 if(err) ath_error("[recv_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02232
02233
02234 unpack_particle(pG, recv_buf, cnt_recv);
02235 }
02236
02237
02238
02239
02240
02241 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id2r,
02242 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02243 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02244
02245
02246 err = MPI_Send(&n2, 1, MPI_LONG, id2s, boundary_particle_tag, MPI_COMM_WORLD);
02247 if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02248
02249
02250 err = MPI_Wait(&rq, &stat);
02251 if(err) ath_error("[receive_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02252
02253
02254 if ((cnt_recv+1) >= recv_bufsize)
02255 realloc_recvbuf();
02256
02257
02258 if (cnt_recv > 0) {
02259
02260 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id2r,
02261 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02262 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02263 }
02264
02265 if (n2 > 0) {
02266
02267 err = MPI_Send(send_buf, n2*NVAR_P, MPI_DOUBLE, id2s,
02268 boundary_particle_tag, MPI_COMM_WORLD);
02269 if(err) ath_error("[send_ix1_particle_shear]: MPI_Send error = %d\n",err);
02270 }
02271
02272 if (cnt_recv > 0) {
02273
02274 err = MPI_Wait(&rq, &stat);
02275 if(err) ath_error("[recv_ix1_particle_shear]: MPI_Wait error = %d\n",err);
02276
02277
02278 unpack_particle(pG, recv_buf, cnt_recv);
02279 }
02280
02281 #endif
02282
02283 return;
02284 }
02285
02286
02287
02288
02289
02290
02291
02292
02293 static void shearingbox_ox1_particle(Grid *pG, Domain *pD, long numpar)
02294 {
02295 #ifdef MPI_PARALLEL
02296
02297 Real yshear, yshift;
02298 Real x2len;
02299 int id1s, id1r, id2s, id2r;
02300 int ind1, ind2, ind3;
02301 long n1, n2;
02302 long cnt_recv;
02303 int err;
02304 MPI_Request rq;
02305 MPI_Status stat;
02306 #endif
02307
02308 #ifndef MPI_PARALLEL
02309 packing_ox1_particle_shear(pG, 0, numpar);
02310 #else
02311
02312
02313
02314 yshear = vshear*pG->time;
02315 yshift = fmod(yshear, Lx2);
02316 x2len = pG->dx2*pG->Nx2;
02317
02318
02319 ind1 = pD->NGrid_x1-1; ind3 = my_kproc;
02320 ind2 = my_jproc - (int)(yshift/x2len) - 1;
02321 if (ind2 < 0) ind2 += pD->NGrid_x2;
02322 if (ind2 < 0) ind2 += pD->NGrid_x2;
02323 id1s = pD->GridArray[ind3][ind2][ind1].id;
02324
02325 ind2 = my_jproc - (int)(yshift/x2len);
02326 if (ind2 < 0) ind2 += pD->NGrid_x2;
02327 id2s = pD->GridArray[ind3][ind2][ind1].id;
02328
02329 ind2 = my_jproc + (int)(yshift/x2len) + 1;
02330 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02331 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02332 id1r = pD->GridArray[ind3][ind2][ind1].id;
02333
02334 ind2 = my_jproc + (int)(yshift/x2len);
02335 if (ind2 > (pD->NGrid_x2-1)) ind2 -= pD->NGrid_x2;
02336 id2r = pD->GridArray[ind3][ind2][ind1].id;
02337
02338
02339
02340
02341 n1 = packing_ox1_particle_shear(pG, 1, numpar);
02342
02343
02344
02345
02346 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id1r,
02347 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02348 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02349
02350
02351 err = MPI_Send(&n1, 1, MPI_LONG, id1s, boundary_particle_tag, MPI_COMM_WORLD);
02352 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02353
02354
02355 err = MPI_Wait(&rq, &stat);
02356 if(err) ath_error("[recc_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02357
02358
02359 if ((cnt_recv+1) >= recv_bufsize)
02360 realloc_recvbuf();
02361
02362
02363 if (cnt_recv > 0) {
02364
02365 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id1r,
02366 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02367 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02368 }
02369
02370 if (n1 > 0) {
02371
02372 err = MPI_Send(send_buf, n1*NVAR_P, MPI_DOUBLE, id1s,
02373 boundary_particle_tag, MPI_COMM_WORLD);
02374 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02375 }
02376
02377
02378 n2 = packing_ox1_particle_shear(pG, 2, numpar);
02379 if (cnt_recv > 0) {
02380
02381 err = MPI_Wait(&rq, &stat);
02382 if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02383
02384
02385 unpack_particle(pG, recv_buf, cnt_recv);
02386 }
02387
02388
02389
02390
02391
02392 err = MPI_Irecv(&cnt_recv, 1, MPI_LONG, id2r,
02393 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02394 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02395
02396
02397 err = MPI_Send(&n2, 1, MPI_LONG, id2s, boundary_particle_tag, MPI_COMM_WORLD);
02398 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02399
02400
02401 err = MPI_Wait(&rq, &stat);
02402 if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02403
02404
02405 if ((cnt_recv+1) >= recv_bufsize)
02406 realloc_recvbuf();
02407
02408
02409 if (cnt_recv > 0) {
02410
02411 err = MPI_Irecv(recv_buf, cnt_recv*NVAR_P, MPI_DOUBLE, id2r,
02412 boundary_particle_tag, MPI_COMM_WORLD, &rq);
02413 if(err) ath_error("[set_bvals_particle]: MPI_Irecv error = %d\n",err);
02414 }
02415
02416 if (n2 > 0) {
02417
02418 err = MPI_Send(send_buf, n2*NVAR_P, MPI_DOUBLE, id2s,
02419 boundary_particle_tag, MPI_COMM_WORLD);
02420 if(err) ath_error("[send_ox1_particle_shear]: MPI_Send error = %d\n",err);
02421 }
02422
02423 if (cnt_recv > 0) {
02424
02425 err = MPI_Wait(&rq, &stat);
02426 if(err) ath_error("[recv_ox1_particle_shear]: MPI_Wait error = %d\n",err);
02427
02428
02429 unpack_particle(pG, recv_buf, cnt_recv);
02430 }
02431
02432 #endif
02433
02434 return;
02435 }
02436
02437
02438
02439
02440
02441
02442
02443
02444
02445
02446 static long packing_ix1_particle_shear(Grid *pG, int reg, long numpar)
02447 {
02448 Grain *cur;
02449 long n, p;
02450 Real ix1b;
02451
02452 Real yshear, yshift;
02453
02454 Real x20, x2c;
02455 double *pd;
02456
02457
02458
02459 yshear = vshear*pG->time;
02460 yshift = fmod(yshear, Lx2);
02461 x20 = pG->x2_0+(pG->je+pG->jdisp+1)*pG->dx2;
02462 x2c = x20 - fmod(yshear, pG->dx2*pG->Nx2);
02463
02464
02465 ix1b = pG->x1_0 + (pG->is + pG->idisp)*pG->dx1;
02466
02467
02468 pd = send_buf;
02469 n = 0;
02470
02471
02472
02473 p = numpar;
02474 while (p<pG->nparticle) {
02475 cur = &(pG->particle[p]);
02476 p += 1;
02477 if ((cur->pos == 21) || ( (cur->pos == 0) && (cur->x1 < ix1b)))
02478 {
02479
02480 if (((reg == 1) && (cur->x2 >= x2c)) || ((reg == 2) && (cur->x2 < x2c)))
02481 {
02482
02483 cur->x2 = x2min + fmod(cur->x2 - x2min + yshift, Lx2);
02484
02485
02486 packing_one_particle(cur, n, cur->pos);
02487 n += 1;
02488
02489
02490 pG->nparticle -= 1;
02491 pG->grproperty[cur->property].num -= 1;
02492 p -= 1;
02493 pG->particle[p] = pG->particle[pG->nparticle];
02494 }
02495
02496 if (reg == 0)
02497 cur->x2 = x2min + fmod(cur->x2 - x2min + yshift, Lx2);
02498 }
02499 }
02500
02501 return n;
02502 }
02503
02504
02505
02506
02507
02508
02509
02510
02511
02512
02513 static long packing_ox1_particle_shear(Grid *pG, int reg, long numpar)
02514 {
02515 Grain *cur;
02516 long n, p;
02517 Real ox1b;
02518
02519 Real yshear, yshift;
02520
02521 Real x20, x2c;
02522 double *pd;
02523
02524
02525
02526 yshear = vshear*pG->time;
02527 yshift = fmod(yshear, Lx2);
02528 x20 = pG->x2_0+(pG->js+pG->jdisp)*pG->dx2;
02529 x2c = x20 + fmod(yshear, pG->dx2*pG->Nx2);
02530
02531
02532 ox1b = pG->x1_0 + (pG->ie + 1 + pG->idisp)*pG->dx1;
02533
02534
02535 pd = send_buf;
02536 n = 0;
02537
02538
02539
02540 p = numpar;
02541 while (p<pG->nparticle) {
02542 cur = &(pG->particle[p]);
02543 p += 1;
02544 if ((cur->pos == 11) || ( (cur->pos == 0) && (cur->x1 >= ox1b)))
02545 {
02546
02547 if (((reg == 1) && (cur->x2 < x2c)) || ((reg == 2) && (cur->x2 >= x2c)))
02548 {
02549
02550
02551 cur->x2 = x2min + fmod(cur->x2 - x2min + Lx2 - yshift, Lx2);
02552
02553
02554 packing_one_particle(cur, n, cur->pos);
02555 n += 1;
02556
02557
02558 pG->nparticle -= 1;
02559 pG->grproperty[cur->property].num -= 1;
02560 p -= 1;
02561 pG->particle[p] = pG->particle[pG->nparticle];
02562 }
02563 if (reg == 0)
02564 cur->x2 = x2min + fmod(cur->x2 - x2min + Lx2 - yshift, Lx2);
02565 }
02566 }
02567
02568 return n;
02569 }
02570
02571 #ifdef FARGO
02572
02573
02574
02575
02576
02577
02578
02579
02580
02581 static long packing_particle_fargo(Grid *pG, Real yl, Real yu)
02582 {
02583 Grain *cur;
02584 long p, n;
02585 double *pd;
02586
02587 p = 0;
02588 n = 0;
02589 pd = send_buf;
02590 while (p<pG->nparticle) {
02591 cur = &(pG->particle[p]);
02592 p += 1;
02593 if ((cur->x2 >= yl) && (cur->x2 < yu))
02594 {
02595 packing_one_particle(cur, n, cur->pos);
02596 n += 1;
02597
02598
02599 pG->nparticle -= 1;
02600 pG->grproperty[cur->property].num -= 1;
02601 p -= 1;
02602 pG->particle[p] = pG->particle[pG->nparticle];
02603 }
02604 }
02605
02606 return n;
02607 }
02608
02609
02610
02611
02612 static int gridshift(Real shift)
02613 {
02614 if (shift>0)
02615 return (int)(shift)+1;
02616 else if (shift<0)
02617 return (int)(shift)-1;
02618 else return 0;
02619 }
02620
02621 #endif
02622
02623 #endif
02624
02625 #undef NVAR_P
02626
02627 #endif