00001 #include "copyright.h"
00002 #define MAIN_C
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 static char *athena_version = "version 4.0 - 01-Jul-2010";
00032
00033 #include <stdio.h>
00034 #include <stdlib.h>
00035 #include <string.h>
00036 #include <math.h>
00037 #include <time.h>
00038 #include <unistd.h>
00039 #include <sys/stat.h>
00040 #include <sys/time.h>
00041 #include <sys/times.h>
00042 #include <sys/types.h>
00043 #include "defs.h"
00044 #include "athena.h"
00045 #include "globals.h"
00046 #include "prototypes.h"
00047
00048
00049
00050
00051
00052
00053
00054 static void change_rundir(const char *name);
00055 static void usage(const char *prog);
00056
00057
00058
00059
00060
00061 #define MAX_FILE_OP 256
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079 int main(int argc, char *argv[])
00080 {
00081 MeshS Mesh;
00082 VDFun_t Integrate;
00083 #ifdef SELF_GRAVITY
00084 VDFun_t SelfGrav;
00085 #endif
00086 int nl,nd;
00087 char *definput = "athinput";
00088 char *athinput = definput;
00089 int ires=0;
00090 char *res_file = NULL;
00091 char *rundir = NULL;
00092 int nstep_start=0;
00093 char *name = NULL;
00094 char level_dir[80];
00095 FILE *fp;
00096 int nflag=0;
00097 int i,nlim;
00098 Real tlim;
00099
00100 int out_level, err_level, lazy;
00101 int iflush, nflush;
00102
00103 int iquit=0;
00104
00105
00106
00107 time_t start, stop;
00108 int have_time = time(&start);
00109 int zones;
00110 double cpu_time, zcs;
00111 long clk_tck = sysconf(_SC_CLK_TCK);
00112 struct tms tbuf;
00113 clock_t time0,time1, have_times;
00114 struct timeval tvs, tve;
00115 Real dt_done;
00116
00117 #ifdef MPI_PARALLEL
00118 char *pc, *suffix, new_name[MAXLEN];
00119 int len, h, m, s, err, use_wtlim=0;
00120 double wtend;
00121 if(MPI_SUCCESS != MPI_Init(&argc, &argv))
00122 ath_error("[main]: Error on calling MPI_Init\n");
00123 #endif
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143 for (i=1; i<argc; i++) {
00144
00145 if(*argv[i] == '-' && *(argv[i]+1) != '\0' && *(argv[i]+2) == '\0'){
00146 switch(*(argv[i]+1)) {
00147 case 'i':
00148 athinput = argv[++i];
00149 break;
00150 case 'r':
00151 ires = 1;
00152 res_file = argv[++i];
00153
00154 if(athinput == definput) athinput = res_file;
00155 break;
00156 case 'd':
00157 rundir = argv[++i];
00158 break;
00159 case 'n':
00160 nflag = 1;
00161 break;
00162 case 'h':
00163 usage(argv[0]);
00164 break;
00165 case 'c':
00166 show_config();
00167 exit(0);
00168 break;
00169 #ifdef MPI_PARALLEL
00170 case 't':
00171 use_wtlim = 1;
00172 sscanf(argv[++i],"%d:%d:%d",&h,&m,&s);
00173 wtend = MPI_Wtime() + s + 60*(m + 60*h);
00174 printf("Wall time limit: %d hrs, %d min, %d sec\n",h,m,s);
00175 break;
00176 #else
00177 default:
00178 usage(argv[0]);
00179 break;
00180 #endif
00181 }
00182 }
00183 }
00184
00185
00186
00187
00188
00189 #ifdef MPI_PARALLEL
00190
00191
00192 if(MPI_SUCCESS != MPI_Comm_rank(MPI_COMM_WORLD, &myID_Comm_world))
00193 ath_error("[main]: Error on calling MPI_Comm_rank\n");
00194
00195
00196
00197
00198 if(myID_Comm_world == 0){
00199 par_open(athinput);
00200 par_cmdline(argc,argv);
00201 }
00202 par_dist_mpi(myID_Comm_world,MPI_COMM_WORLD);
00203
00204
00205
00206
00207
00208
00209 if(myID_Comm_world != 0){
00210 name = par_gets("job","problem_id");
00211 sprintf(new_name,"%s-id%d",name,myID_Comm_world);
00212 free(name);
00213 par_sets("job","problem_id",new_name,NULL);
00214 }
00215
00216 show_config_par();
00217
00218
00219
00220 if(MPI_SUCCESS != MPI_Bcast(&ires, 1, MPI_INT, 0, MPI_COMM_WORLD))
00221 ath_error("[main]: Error on calling MPI_Bcast\n");
00222
00223
00224
00225
00226
00227
00228
00229 if(ires){
00230 if(myID_Comm_world == 0)
00231 len = 1 + (int)strlen(res_file);
00232
00233
00234
00235 if(MPI_SUCCESS != MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD))
00236 ath_error("[main]: Error on calling MPI_Bcast\n");
00237
00238 if(len + 10 > MAXLEN)
00239 ath_error("[main]: Restart filename length = %d is too large\n",len);
00240
00241
00242
00243 if(myID_Comm_world == 0) strcpy(new_name, res_file);
00244 if(MPI_SUCCESS != MPI_Bcast(new_name, len, MPI_CHAR, 0, MPI_COMM_WORLD))
00245 ath_error("[main]: Error on calling MPI_Bcast\n");
00246
00247
00248
00249
00250 pc = &(new_name[len - 5]);
00251 if(*pc != '.'){
00252 ath_error("[main]: Bad Restart filename: %s\n",new_name);
00253 }
00254
00255 do{
00256 pc--;
00257 if(pc == new_name)
00258 ath_error("[main]: Bad Restart filename: %s\n",new_name);
00259 }while(*pc != '.');
00260
00261
00262
00263 if(myID_Comm_world == 0) {
00264 strcpy(new_name, res_file);
00265 } else {
00266 suffix = ath_strdup(pc);
00267 sprintf(pc,"-id%d%s",myID_Comm_world,suffix);
00268 free(suffix);
00269 res_file = new_name;
00270 }
00271 }
00272
00273
00274
00275 if(nflag){
00276 par_dump(0,stdout);
00277 par_close();
00278 MPI_Finalize();
00279 return 0;
00280 }
00281
00282 #else
00283
00284
00285
00286 myID_Comm_world = 0;
00287 par_open(athinput);
00288 par_cmdline(argc,argv);
00289 show_config_par();
00290
00291
00292
00293 if(nflag){
00294 par_dump(0,stdout);
00295 par_close();
00296 return 0;
00297 }
00298 #endif
00299
00300
00301
00302
00303
00304
00305
00306
00307 if(par_geti_def("log","file_open",0)){
00308 iflush = par_geti_def("log","iflush",0);
00309 name = par_gets("job","problem_id");
00310 lazy = par_geti_def("log","lazy",1);
00311
00312 ath_log_open(name, lazy, (ires ? "a" : "w"));
00313 free(name); name = NULL;
00314 }
00315 else{
00316 iflush = par_geti_def("log","iflush",1);
00317 }
00318 iflush = iflush > 0 ? iflush : 0;
00319
00320
00321 out_level = par_geti_def("log","out_level",0);
00322 err_level = par_geti_def("log","err_level",0);
00323 #ifdef MPI_PARALLEL
00324 if(myID_Comm_world > 0){
00325 out_level = par_geti_def("log","child_out_level",-1);
00326 err_level = par_geti_def("log","child_err_level",-1);
00327 }
00328 #endif
00329 ath_log_set_level(out_level, err_level);
00330
00331 if(have_time > 0)
00332 ath_pout(0,"Simulation started on %s\n",ctime(&start));
00333
00334
00335
00336
00337 init_mesh(&Mesh);
00338 init_grid(&Mesh);
00339 #ifdef PARTICLES
00340 init_particle(&Mesh);
00341 #endif
00342
00343
00344
00345
00346
00347
00348 CourNo = par_getd("time","cour_no");
00349 nlim = par_geti_def("time","nlim",-1);
00350 tlim = par_getd("time","tlim");
00351
00352 #ifdef ISOTHERMAL
00353 Iso_csound = par_getd("problem","iso_csound");
00354 Iso_csound2 = Iso_csound*Iso_csound;
00355 #else
00356 Gamma = par_getd("problem","gamma");
00357 Gamma_1 = Gamma - 1.0;
00358 Gamma_2 = Gamma - 2.0;
00359 #endif
00360
00361
00362 #ifdef SELF_GRAVITY
00363 grav_mean_rho = -1.0;
00364 four_pi_G = -1.0;
00365 #endif
00366
00367 if(ires) {
00368 restart_grids(res_file, &Mesh);
00369 nstep_start = Mesh.nstep;
00370 } else {
00371 for (nl=0; nl<(Mesh.NLevels); nl++){
00372 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00373 if (Mesh.Domain[nl][nd].Grid != NULL) problem(&(Mesh.Domain[nl][nd]));
00374 }
00375 }
00376 }
00377
00378
00379 #ifdef STATIC_MESH_REFINEMENT
00380 SMR_init(&Mesh);
00381 RestrictCorrect(&Mesh);
00382 #endif
00383
00384
00385 nflush = nstep_start + iflush;
00386
00387
00388
00389
00390
00391
00392 bvals_mhd_init(&Mesh);
00393
00394 #ifdef SELF_GRAVITY
00395 bvals_grav_init(&Mesh);
00396 #endif
00397 #if defined(SHEARING_BOX) || (defined(FARGO) && defined(CYLINDRICAL))
00398 bvals_shear_init(&Mesh);
00399 #endif
00400 #ifdef PARTICLES
00401 bvals_particle_init(&Mesh);
00402 #endif
00403
00404 for (nl=0; nl<(Mesh.NLevels); nl++){
00405 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00406 if (Mesh.Domain[nl][nd].Grid != NULL){
00407 bvals_mhd(&(Mesh.Domain[nl][nd]));
00408 #ifdef PARTICLES
00409 bvals_particle(&(Mesh.Domain[nl][nd]));
00410 #ifdef FEEDBACK
00411 exchange_feedback_init(&(Mesh.Domain[nl][nd]));
00412 #endif
00413 #endif
00414 }
00415 }
00416 }
00417
00418
00419 #ifdef STATIC_MESH_REFINEMENT
00420 Prolongate(&Mesh);
00421 #endif
00422
00423
00424
00425 if(ires == 0) new_dt(&Mesh);
00426
00427
00428
00429
00430
00431
00432 init_output(&Mesh);
00433 lr_states_init(&Mesh);
00434 Integrate = integrate_init(&Mesh);
00435 #ifdef SELF_GRAVITY
00436 SelfGrav = selfg_init(&Mesh);
00437 for (nl=0; nl<(Mesh.NLevels); nl++){
00438 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00439 if (Mesh.Domain[nl][nd].Grid != NULL){
00440 (*SelfGrav)(&(Mesh.Domain[nl][nd]));
00441 bvals_grav(&(Mesh.Domain[nl][nd]));
00442 }
00443 }
00444 }
00445 #endif
00446 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00447 integrate_diff_init(&Mesh);
00448 #endif
00449
00450
00451
00452
00453 if(out_level >= 0){
00454 fp = athout_fp();
00455 par_dump(0,fp);
00456 }
00457 change_rundir(rundir);
00458 ath_sig_init();
00459 for (nl=1; nl<(Mesh.NLevels); nl++){
00460 sprintf(level_dir,"lev%d",nl);
00461 mkdir(level_dir, 0775);
00462 }
00463
00464 gettimeofday(&tvs,NULL);
00465 if((have_times = times(&tbuf)) > 0)
00466 time0 = tbuf.tms_utime + tbuf.tms_stime;
00467 else
00468 time0 = clock();
00469
00470
00471
00472 if (ires==0) data_output(&Mesh, 1);
00473
00474 ath_pout(0,"\nSetup complete, entering main loop...\n\n");
00475 ath_pout(0,"cycle=%i time=%e next dt=%e\n",Mesh.nstep, Mesh.time, Mesh.dt);
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490 while (Mesh.time < tlim && (nlim < 0 || Mesh.nstep < nlim)) {
00491
00492
00493
00494
00495 data_output(&Mesh, 0);
00496
00497
00498
00499
00500
00501 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00502 integrate_diff(&Mesh);
00503 for (nl=0; nl<(Mesh.NLevels); nl++){
00504 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00505 if (Mesh.Domain[nl][nd].Grid != NULL){
00506 bvals_mhd(&(Mesh.Domain[nl][nd]));
00507 }
00508 }
00509 }
00510 #endif
00511
00512
00513
00514
00515 for (nl=0; nl<(Mesh.NLevels); nl++){
00516 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00517 if (Mesh.Domain[nl][nd].Grid != NULL){
00518 (*Integrate)(&(Mesh.Domain[nl][nd]));
00519
00520 #ifdef FARGO
00521 Fargo(&(Mesh.Domain[nl][nd]));
00522 #ifdef PARTICLES
00523 advect_particles(&level0_Grid, &level0_Domain);
00524 #endif
00525 #endif
00526 }
00527 }
00528 }
00529
00530
00531
00532
00533 #ifdef STATIC_MESH_REFINEMENT
00534 RestrictCorrect(&Mesh);
00535 #endif
00536
00537
00538
00539
00540 Userwork_in_loop(&Mesh);
00541
00542
00543
00544
00545
00546 #ifdef SELF_GRAVITY
00547 for (nl=0; nl<(Mesh.NLevels); nl++){
00548 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00549 if (Mesh.Domain[nl][nd].Grid != NULL){
00550 (*SelfGrav)(&(Mesh.Domain[nl][nd]));
00551 bvals_grav(&(Mesh.Domain[nl][nd]));
00552 selfg_fc(&(Mesh.Domain[nl][nd]));
00553 }
00554 }
00555 }
00556 #endif
00557
00558
00559
00560
00561 Mesh.nstep++;
00562 Mesh.time += Mesh.dt;
00563 for (nl=0; nl<(Mesh.NLevels); nl++){
00564 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00565 if (Mesh.Domain[nl][nd].Grid != NULL){
00566 Mesh.Domain[nl][nd].Grid->time = Mesh.time;
00567 }
00568 }
00569 }
00570
00571 dt_done = Mesh.dt;
00572 new_dt(&Mesh);
00573
00574
00575
00576
00577
00578 for (nl=0; nl<(Mesh.NLevels); nl++){
00579 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00580 if (Mesh.Domain[nl][nd].Grid != NULL){
00581 bvals_mhd(&(Mesh.Domain[nl][nd]));
00582 #ifdef PARTICLES
00583 bvals_particle(&level0_Grid, &level0_Domain);
00584 #endif
00585 }
00586 }
00587 }
00588
00589 #ifdef STATIC_MESH_REFINEMENT
00590 Prolongate(&Mesh);
00591 #endif
00592
00593
00594
00595
00596 #ifdef MPI_PARALLEL
00597 if(use_wtlim && (MPI_Wtime() > wtend))
00598 iquit = 103;
00599 #endif
00600 if(ath_sig_act(&iquit) != 0) break;
00601
00602
00603
00604 ath_pout(0,"cycle=%i time=%e next dt=%e last dt=%e\n",
00605 Mesh.nstep,Mesh.time,Mesh.dt,dt_done);
00606
00607 if(nflush == Mesh.nstep){
00608 ath_flush_out();
00609 ath_flush_err();
00610 nflush += iflush;
00611 }
00612 }
00613
00614
00615
00616
00617
00618
00619 if (Mesh.nstep == nlim)
00620 ath_pout(0,"\nterminating on cycle limit\n");
00621 #ifdef MPI_PARALLEL
00622 else if(use_wtlim && iquit == 103)
00623 ath_pout(0,"\nterminating on wall-time limit\n");
00624 #endif
00625 else
00626 ath_pout(0,"\nterminating on time limit\n");
00627
00628
00629
00630 gettimeofday(&tve,NULL);
00631 if(have_times > 0) {
00632 times(&tbuf);
00633 time1 = tbuf.tms_utime + tbuf.tms_stime;
00634 cpu_time = (time1 > time0 ? (double)(time1 - time0) : 1.0)/
00635 (double)clk_tck;
00636 } else {
00637 time1 = clock();
00638 cpu_time = (time1 > time0 ? (double)(time1 - time0) : 1.0)/
00639 (double)CLOCKS_PER_SEC;
00640 }
00641
00642
00643
00644 zones = 0;
00645 for (nl=0; nl<(Mesh.NLevels); nl++){
00646 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00647 if (Mesh.Domain[nl][nd].Grid != NULL) {
00648 zones += (Mesh.Domain[nl][nd].Grid->Nx[0])*
00649 (Mesh.Domain[nl][nd].Grid->Nx[1])*
00650 (Mesh.Domain[nl][nd].Grid->Nx[2]);
00651 }
00652 }}
00653 zcs = (double)zones*(double)((Mesh.nstep) - nstep_start)/cpu_time;
00654
00655 ath_pout(0," tlim= %e nlim= %i\n",tlim,nlim);
00656 ath_pout(0," time= %e cycle= %i\n",Mesh.time,Mesh.nstep);
00657 ath_pout(0,"\nzone-cycles/cpu-second = %e\n",zcs);
00658
00659
00660
00661 cpu_time = (double)(tve.tv_sec - tvs.tv_sec) +
00662 1.0e-6*(double)(tve.tv_usec - tvs.tv_usec);
00663 zcs = (double)zones*(double)((Mesh.nstep) - nstep_start)/cpu_time;
00664 ath_pout(0,"\nelapsed wall time = %e sec.\n",cpu_time);
00665 ath_pout(0,"\nzone-cycles/wall-second = %e\n",zcs);
00666
00667
00668 #ifdef MPI_PARALLEL
00669 zones = 0;
00670 for (nl=0; nl<(Mesh.NLevels); nl++){
00671 for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00672 zones += (Mesh.Domain[nl][nd].Nx[0])*
00673 (Mesh.Domain[nl][nd].Nx[1])*
00674 (Mesh.Domain[nl][nd].Nx[2]);
00675 }}
00676 zcs = (double)zones*(double)(Mesh.nstep - nstep_start)/cpu_time;
00677 ath_pout(0,"\ntotal zone-cycles/wall-second = %e\n",zcs);
00678 #endif
00679
00680
00681
00682 Userwork_after_loop(&Mesh);
00683
00684
00685
00686 data_output(&Mesh, 1);
00687
00688
00689
00690 lr_states_destruct();
00691 integrate_destruct();
00692 data_output_destruct();
00693 #ifdef PARTICLES
00694 particle_destruct(&level0_Grid);
00695 bvals_particle_destruct(&level0_Grid);
00696 #endif
00697 #if defined(SHEARING_BOX) || (defined(FARGO) && defined(CYLINDRICAL))
00698 bvals_shear_destruct();
00699 #endif
00700 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00701 integrate_diff_destruct();
00702 #endif
00703 par_close();
00704
00705 #ifdef MPI_PARALLEL
00706 MPI_Finalize();
00707 #endif
00708
00709 if(time(&stop)>0)
00710 ath_pout(0,"\nSimulation terminated on %s",ctime(&stop));
00711
00712 ath_log_close();
00713
00714 return EXIT_SUCCESS;
00715 }
00716
00717
00718
00719
00720
00721
00722
00723 void change_rundir(const char *name)
00724 {
00725 #ifdef MPI_PARALLEL
00726
00727 int err=0;
00728 int rerr, gerr, my_id, status;
00729 char mydir[80];
00730
00731 status = MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
00732 if(status != MPI_SUCCESS)
00733 ath_error("[change_rundir]: MPI_Comm_rank error = %d\n",status);
00734
00735 if(name != NULL && *name != '\0'){
00736
00737 if(my_id == 0)
00738 mkdir(name, 0775);
00739
00740 MPI_Barrier(MPI_COMM_WORLD);
00741
00742 baton_start(MAX_FILE_OP, ch_rundir0_tag);
00743
00744 if(chdir(name)){
00745 ath_perr(-1,"[change_rundir]: Cannot change directory to \"%s\"\n",name);
00746 err = 1;
00747 }
00748
00749 baton_stop(MAX_FILE_OP, ch_rundir0_tag);
00750
00751
00752 rerr = MPI_Allreduce(&err, &gerr, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
00753 if(rerr) ath_perr(-1,"[change_rundir]: MPI_Allreduce error = %d\n",rerr);
00754
00755 if(rerr || gerr){
00756 MPI_Abort(MPI_COMM_WORLD, 1);
00757 exit(EXIT_FAILURE);
00758 }
00759 }
00760
00761
00762
00763 sprintf(mydir, "id%d", my_id);
00764
00765 baton_start(MAX_FILE_OP, ch_rundir1_tag);
00766
00767 mkdir(mydir, 0775);
00768 if(chdir(mydir)){
00769 ath_perr(-1,"[change_rundir]: Cannot change directory to \"%s\"\n",mydir);
00770 err = 1;
00771 }
00772
00773 baton_stop(MAX_FILE_OP, ch_rundir1_tag);
00774
00775
00776 rerr = MPI_Allreduce(&err, &gerr, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
00777 if(rerr) ath_perr(-1,"[change_rundir]: MPI_Allreduce error = %d\n",rerr);
00778
00779 if(rerr || gerr){
00780 MPI_Abort(MPI_COMM_WORLD, 1);
00781 exit(EXIT_FAILURE);
00782 }
00783
00784 #else
00785
00786 if(name == NULL || *name == '\0') return;
00787
00788 mkdir(name, 0775);
00789 if(chdir(name))
00790 ath_error("[change_rundir]: Cannot change directory to \"%s\"\n",name);
00791
00792 #endif
00793
00794 return;
00795 }
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805 static void usage(const char *prog)
00806 {
00807 ath_perr(-1,"Athena %s\n",athena_version);
00808 ath_perr(-1," Last configure: %s\n",CONFIGURE_DATE);
00809 ath_perr(-1,"\nUsage: %s [options] [block/par=value ...]\n",prog);
00810 ath_perr(-1,"\nOptions:\n");
00811 ath_perr(-1," -i <file> Alternate input file [athinput]\n");
00812 ath_perr(-1," -r <file> Restart a simulation with this file\n");
00813 ath_perr(-1," -d <directory> Alternate run dir [current dir]\n");
00814 ath_perr(-1," -h This Help, and configuration settings\n");
00815 ath_perr(-1," -n Parse input, but don't run program\n");
00816 ath_perr(-1," -c Show Configuration details and quit\n");
00817 ath_perr(-1," -t hh:mm:ss With MPI, wall time limit for final output\n");
00818 show_config();
00819 exit(0);
00820 }