• Main Page
  • Classes
  • Files
  • File List
  • File Members

main.c

Go to the documentation of this file.
00001 #include "copyright.h"
00002 #define MAIN_C
00003 /*============================================================================*/
00004 /*! \file main.c
00005  *  \brief Athena main program file.
00006  *
00007  * //////////////////////////// ATHENA Main Program \\\\\\\\\\\\\\\\\\\\\\\\\\\
00008  *
00009  *  Athena - C version developed by JM Stone, TA Gardiner, & PJ Teuben.
00010  *
00011  *  Significant additional contributions from X. Bai, K. Beckwith, N. Lemaster,
00012  *  I. Parrish, & A. Skinner.  See also the F90 version developed by JF Hawley
00013  *  & JB Simon.
00014  *
00015  *  History:
00016  * - v1.0 [Feb 2003] - 1D adiabatic and isothermal MHD
00017  * - v1.1 [Sep 2003] - bug fixes in eigensystems
00018  * - v2.0 [Dec 2004] - 2D adiabatic and isothermal MHD
00019  * - v3.0 [Feb 2007] - 3D adiabatic and isothermal MHD with MPI
00020  * - v3.1 [Jan 2008] - multiple species, self-gravity
00021  * - v3.2 [Sep 2009] - viscosity, resistivity, conduction, particles, special
00022  *                     relativity, cylindrical coordinates
00023  * - v4.0 [Jul 2010] - static mesh refinement with MPI
00024  *
00025  * See the GNU General Public License for usage restrictions. 
00026  *                                                                              
00027  * PRIVATE FUNCTION PROTOTYPES:
00028  * - change_rundir() - creates and outputs data to new directory
00029  * - usage()         - outputs help message and terminates execution          */
00030 /*============================================================================*/
00031 static char *athena_version = "version 4.0 - 01-Jul-2010";
00032 
00033 #include <stdio.h>
00034 #include <stdlib.h>
00035 #include <string.h>
00036 #include <math.h>
00037 #include <time.h>
00038 #include <unistd.h>
00039 #include <sys/stat.h>
00040 #include <sys/time.h>
00041 #include <sys/times.h>
00042 #include <sys/types.h>
00043 #include "defs.h"
00044 #include "athena.h"
00045 #include "globals.h"
00046 #include "prototypes.h"
00047 
00048 /*==============================================================================
00049  * PRIVATE FUNCTION PROTOTYPES:
00050  *   change_rundir - creates and outputs data to new directory
00051  *   usage         - outputs help message and terminates execution
00052  *============================================================================*/
00053 
00054 static void change_rundir(const char *name);
00055 static void usage(const char *prog);
00056 
00057 /* Maximum number of mkdir() and chdir() file operations that will be executed
00058  * at once in the change_rundir() function when running in parallel, passed to
00059  * baton_start() and baton_end().
00060  */ 
00061 #define MAX_FILE_OP 256
00062 
00063 /*----------------------------------------------------------------------------*/
00064 /*! \fn int main(int argc, char *argv[]) 
00065  *  \brief Athena main program  
00066  *
00067  * Steps in main:
00068  * - 1 - check for command line options and respond
00069  * - 2 - read input file and parse command line for changes
00070  * - 3 - set up diagnostic log files
00071  * - 4 - initialize Mesh, Domains, and Grids
00072  * - 5 - set initial conditions
00073  * - 6 - set boundary condition function pointers, and use to set BCs
00074  * - 7 - set function pointers for desired algorithms and physics
00075  * - 8 - write initial conditions to output file(s)
00076  * - 9 - main loop
00077  * - 10 - finish by writing final output(s), diagnostics, and free memory     */
00078 
00079 int main(int argc, char *argv[])
00080 {
00081   MeshS Mesh;             /* the entire mesh hierarchy, see athena.h */
00082   VDFun_t Integrate;      /* function pointer to integrator, set at runtime */
00083 #ifdef SELF_GRAVITY
00084   VDFun_t SelfGrav;      /* function pointer to self-gravity, set at runtime */
00085 #endif
00086   int nl,nd;
00087   char *definput = "athinput";  /* default input filename */
00088   char *athinput = definput;
00089   int ires=0;             /* restart flag, set to 1 if -r argument on cmdline */
00090   char *res_file = NULL;  /* restart filename */
00091   char *rundir = NULL;    /* directory to which output is directed */
00092   int nstep_start=0;      /* number of cycles already completed on restart */
00093   char *name = NULL;
00094   char level_dir[80];     /* names of directories for levels > 0 */
00095   FILE *fp;               /* file pointer for data outputs */
00096   int nflag=0;            /* set to 1 if -n argument is given on command line */
00097   int i,nlim;             /* cycle index and limit */
00098   Real tlim;              /* time limit (in code units) */
00099 
00100   int out_level, err_level, lazy; /* diagnostic output & error log levels */
00101   int iflush, nflush;             /* flush buffers every iflush cycles */
00102 
00103   int iquit=0;  /* quit signal sent to ath_sig_act, our system signal handler */
00104 
00105 /* local variables used for timing and performance measures */
00106 
00107   time_t start, stop;
00108   int have_time = time(&start);  /* Is current calendar time (UTC) available? */
00109   int zones;
00110   double cpu_time, zcs;
00111   long clk_tck = sysconf(_SC_CLK_TCK);
00112   struct tms tbuf;
00113   clock_t time0,time1, have_times;
00114   struct timeval tvs, tve;
00115   Real dt_done;
00116 
00117 #ifdef MPI_PARALLEL
00118   char *pc, *suffix, new_name[MAXLEN];
00119   int len, h, m, s, err, use_wtlim=0;
00120   double wtend;
00121   if(MPI_SUCCESS != MPI_Init(&argc, &argv))
00122     ath_error("[main]: Error on calling MPI_Init\n");
00123 #endif /* MPI_PARALLEL */
00124 
00125 /*----------------------------------------------------------------------------*/
00126 /* Steps in main:
00127  *  1 - check for command line options and respond
00128  *  2 - read input file and parse command line for changes
00129  *  3 - set up diagnostic log files
00130  *  4 - initialize Mesh, Domains, and Grids
00131  *  5 - set initial conditions
00132  *  6 - set boundary condition function pointers, and use to set BCs
00133  *  7 - set function pointers for desired algorithms and physics
00134  *  8 - write initial conditions to output file(s)
00135  *  9 - main loop
00136  *  10 - finish by writing final output(s), diagnostics, and free memory
00137  */
00138 
00139 /*--- Step 1. ----------------------------------------------------------------*/
00140 /* Check for command line options and respond.  See comments in usage()
00141  * for description of options.  */
00142 
00143   for (i=1; i<argc; i++) {
00144 /* If argv[i] is a 2 character string of the form "-?" then: */
00145     if(*argv[i] == '-'  && *(argv[i]+1) != '\0' && *(argv[i]+2) == '\0'){
00146       switch(*(argv[i]+1)) {
00147       case 'i':                      /* -i <file>   */
00148         athinput = argv[++i];
00149         break;
00150       case 'r':                      /* -r <file>   */
00151         ires = 1;
00152         res_file = argv[++i];
00153 /* If input file is not set on command line, use the restart file */
00154         if(athinput == definput) athinput = res_file;
00155         break;
00156       case 'd':                      /* -d <directory>   */
00157         rundir = argv[++i];
00158         break;
00159       case 'n':                      /* -n */
00160         nflag = 1;
00161         break;
00162       case 'h':                      /* -h */
00163         usage(argv[0]);
00164         break;
00165       case 'c':                      /* -c */
00166         show_config();
00167         exit(0);
00168         break;
00169 #ifdef MPI_PARALLEL
00170       case 't':                      /* -t hh:mm:ss */
00171         use_wtlim = 1; /* Logical to use a wall time limit */
00172         sscanf(argv[++i],"%d:%d:%d",&h,&m,&s);
00173         wtend = MPI_Wtime() + s + 60*(m + 60*h);
00174         printf("Wall time limit: %d hrs, %d min, %d sec\n",h,m,s);
00175         break;
00176 #else
00177       default:
00178         usage(argv[0]);
00179         break;
00180 #endif /* MPI_PARALLEL */
00181       }
00182     }
00183   }
00184 
00185 /*--- Step 2. ----------------------------------------------------------------*/
00186 /* Read input file and parse command line.  For MPI_PARALLEL jobs, parent reads
00187  * input file and distributes information to children  */
00188 
00189 #ifdef MPI_PARALLEL
00190 /* Get proc id (rank) in MPI_COMM_WORLD, store as global variable */
00191 
00192   if(MPI_SUCCESS != MPI_Comm_rank(MPI_COMM_WORLD, &myID_Comm_world))
00193     ath_error("[main]: Error on calling MPI_Comm_rank\n");
00194 
00195 /* Only rank=0 processor reads input parameter file, parses command line,
00196  * broadcasts the contents of the (updated) parameter file to the children. */
00197 
00198   if(myID_Comm_world == 0){
00199     par_open(athinput);   /* for restarts, default is athinput=resfile */ 
00200     par_cmdline(argc,argv);
00201   }
00202   par_dist_mpi(myID_Comm_world,MPI_COMM_WORLD);
00203 
00204 /* Modify the problem_id name in the <job> block to include information about
00205  * processor ids, so that all output filenames constructed from this name will
00206  * include myID_Comm_world as an identifier.  Only child processes modify
00207  * name, rank=0 process does not have myID_Comm_world in the filename */
00208 
00209   if(myID_Comm_world != 0){
00210     name = par_gets("job","problem_id");
00211     sprintf(new_name,"%s-id%d",name,myID_Comm_world);
00212     free(name);
00213     par_sets("job","problem_id",new_name,NULL);
00214   }
00215 
00216   show_config_par(); /* Add the configure block to the parameter database */
00217 
00218 /* Share the restart flag with the children */
00219 
00220   if(MPI_SUCCESS != MPI_Bcast(&ires, 1, MPI_INT, 0, MPI_COMM_WORLD))
00221     ath_error("[main]: Error on calling MPI_Bcast\n");
00222 
00223 /* rank=0 needs to send the restart file name to the children.  This requires 
00224  * sending the length of the restart filename string, the string, and then
00225  * having each child add my_id to the name so it opens the appropriate file */
00226 
00227 /* Parent finds length of restart filename */
00228 
00229   if(ires){ 
00230     if(myID_Comm_world == 0)
00231       len = 1 + (int)strlen(res_file);
00232 
00233 /* Share this length with the children */
00234 
00235     if(MPI_SUCCESS != MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD))
00236       ath_error("[main]: Error on calling MPI_Bcast\n");
00237 
00238     if(len + 10 > MAXLEN)
00239       ath_error("[main]: Restart filename length = %d is too large\n",len);
00240 
00241 /* Share the restart filename with the children */
00242 
00243     if(myID_Comm_world == 0) strcpy(new_name, res_file);
00244     if(MPI_SUCCESS != MPI_Bcast(new_name, len, MPI_CHAR, 0, MPI_COMM_WORLD))
00245       ath_error("[main]: Error on calling MPI_Bcast\n");
00246 
00247 /* Assume the restart file name is of the form
00248  *  [/some/dir/]basename.0000.rst and search for the periods in the name. */
00249 
00250     pc = &(new_name[len - 5]);
00251     if(*pc != '.'){
00252       ath_error("[main]: Bad Restart filename: %s\n",new_name);
00253     }
00254 
00255     do{ /* Position the char pointer at the first period */
00256       pc--;
00257       if(pc == new_name)
00258         ath_error("[main]: Bad Restart filename: %s\n",new_name);
00259     }while(*pc != '.');
00260 
00261 /* Only children add myID_Comm_world to the filename */
00262 
00263     if(myID_Comm_world == 0) {
00264       strcpy(new_name, res_file);
00265     } else {       
00266       suffix = ath_strdup(pc);
00267       sprintf(pc,"-id%d%s",myID_Comm_world,suffix);
00268       free(suffix);
00269       res_file = new_name;
00270     }
00271   }
00272 
00273 /* Quit MPI_PARALLEL job if code was run with -n option. */
00274 
00275   if(nflag){          
00276     par_dump(0,stdout);   
00277     par_close();
00278     MPI_Finalize();
00279     return 0;
00280   }
00281 
00282 #else
00283 /* For serial (single processor) job, there is only one process to open and
00284  * read input file  */
00285 
00286   myID_Comm_world = 0;
00287   par_open(athinput);   /* opens AND reads */
00288   par_cmdline(argc,argv);
00289   show_config_par();   /* Add the configure block to the parameter database */
00290 
00291 /* Quit non-MPI_PARALLEL job if code was run with -n option. */
00292 
00293   if(nflag){
00294     par_dump(0,stdout);
00295     par_close();
00296     return 0;
00297   }
00298 #endif /* MPI_PARALLEL */
00299 
00300 /*--- Step 3. ----------------------------------------------------------------*/
00301 /* set up the simulation log files */
00302 
00303 /* Open <problem_id>.out and <problem_id>.err files if file_open=1 in the 
00304  * <log> block of the input file.  Otherwise, diagnositic output will go to
00305  * stdout and stderr streams. */
00306 
00307   if(par_geti_def("log","file_open",0)){
00308     iflush = par_geti_def("log","iflush",0);
00309     name = par_gets("job","problem_id");
00310     lazy = par_geti_def("log","lazy",1);
00311     /* On restart we use mode "a", otherwise we use mode "w". */
00312     ath_log_open(name, lazy, (ires ? "a" : "w"));
00313     free(name);  name = NULL;
00314   }
00315   else{
00316     iflush = par_geti_def("log","iflush",1);
00317   }
00318   iflush = iflush > 0 ? iflush : 0; /* make iflush non-negative */
00319 
00320 /* Set the ath_log output and error logging levels */
00321   out_level = par_geti_def("log","out_level",0);
00322   err_level = par_geti_def("log","err_level",0);
00323 #ifdef MPI_PARALLEL
00324     if(myID_Comm_world > 0){   /* Children may use different log levels */
00325     out_level = par_geti_def("log","child_out_level",-1);
00326     err_level = par_geti_def("log","child_err_level",-1);
00327   }
00328 #endif /* MPI_PARALLEL */
00329   ath_log_set_level(out_level, err_level);
00330 
00331   if(have_time > 0) /* current calendar time (UTC) is available */
00332     ath_pout(0,"Simulation started on %s\n",ctime(&start));
00333 
00334 /*--- Step 4. ----------------------------------------------------------------*/
00335 /* Initialize nested mesh hierarchy. */
00336 
00337   init_mesh(&Mesh);
00338   init_grid(&Mesh);
00339 #ifdef PARTICLES
00340   init_particle(&Mesh);
00341 #endif
00342 
00343 /*--- Step 5. ----------------------------------------------------------------*/
00344 /* Set initial conditions, either by reading from restart or calling problem
00345  * generator.  But first start by setting variables in <time> block (these
00346  * control execution time), and reading EOS parameters from <problem> block.  */
00347 
00348   CourNo = par_getd("time","cour_no");
00349   nlim = par_geti_def("time","nlim",-1);
00350   tlim = par_getd("time","tlim");
00351 
00352 #ifdef ISOTHERMAL
00353   Iso_csound = par_getd("problem","iso_csound");
00354   Iso_csound2 = Iso_csound*Iso_csound;
00355 #else
00356   Gamma = par_getd("problem","gamma");
00357   Gamma_1 = Gamma - 1.0;
00358   Gamma_2 = Gamma - 2.0;
00359 #endif
00360 /* initialize gravity constants <0, selfg_init will test these values below to
00361  * ensure user has set values in problem generator */
00362 #ifdef SELF_GRAVITY
00363   grav_mean_rho = -1.0;
00364   four_pi_G = -1.0;
00365 #endif
00366 
00367   if(ires) {
00368     restart_grids(res_file, &Mesh);  /*  Restart */
00369     nstep_start = Mesh.nstep;
00370   } else {                           /* New problem */
00371     for (nl=0; nl<(Mesh.NLevels); nl++){ 
00372       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00373         if (Mesh.Domain[nl][nd].Grid != NULL) problem(&(Mesh.Domain[nl][nd]));
00374       }
00375     }
00376   }
00377 
00378 /* restrict initial solution so grid hierarchy is consistent */
00379 #ifdef STATIC_MESH_REFINEMENT
00380   SMR_init(&Mesh);
00381   RestrictCorrect(&Mesh);
00382 #endif
00383 
00384 /* Initialize the first nstep value to flush the output and error logs. */
00385   nflush = nstep_start + iflush;
00386 
00387 /*--- Step 6. ----------------------------------------------------------------*/
00388 /* set boundary value function pointers using BC flags in <grid> blocks, then
00389  * set boundary conditions for initial conditions.  With SMR, this includes
00390  * a prolongation step to set ghost zones at internal fine/coarse boundaries  */
00391 
00392   bvals_mhd_init(&Mesh);
00393 
00394 #ifdef SELF_GRAVITY
00395   bvals_grav_init(&Mesh);
00396 #endif
00397 #if defined(SHEARING_BOX) || (defined(FARGO) && defined(CYLINDRICAL))
00398   bvals_shear_init(&Mesh);
00399 #endif
00400 #ifdef PARTICLES
00401   bvals_particle_init(&Mesh);
00402 #endif
00403 
00404   for (nl=0; nl<(Mesh.NLevels); nl++){ 
00405     for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00406       if (Mesh.Domain[nl][nd].Grid != NULL){
00407         bvals_mhd(&(Mesh.Domain[nl][nd]));
00408 #ifdef PARTICLES
00409         bvals_particle(&(Mesh.Domain[nl][nd]));
00410 #ifdef FEEDBACK
00411         exchange_feedback_init(&(Mesh.Domain[nl][nd]));
00412 #endif
00413 #endif
00414       }
00415     }
00416   }
00417 
00418 /* Now that BC set, prolongate solution into child Grid GZ with SMR */
00419 #ifdef STATIC_MESH_REFINEMENT
00420   Prolongate(&Mesh);
00421 #endif
00422 
00423 /* For new runs, set initial timestep */
00424 
00425   if(ires == 0) new_dt(&Mesh);
00426 
00427 /*--- Step 7. ----------------------------------------------------------------*/
00428 /* Set function pointers for integrator; self-gravity (based on dimensions)
00429  * Initialize gravitational potential for new runs
00430  * Allocate temporary arrays */
00431 
00432   init_output(&Mesh); 
00433   lr_states_init(&Mesh);
00434   Integrate = integrate_init(&Mesh);
00435 #ifdef SELF_GRAVITY
00436   SelfGrav = selfg_init(&Mesh);
00437   for (nl=0; nl<(Mesh.NLevels); nl++){ 
00438     for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00439       if (Mesh.Domain[nl][nd].Grid != NULL){
00440         (*SelfGrav)(&(Mesh.Domain[nl][nd]));
00441         bvals_grav(&(Mesh.Domain[nl][nd]));
00442       }
00443     }
00444   }
00445 #endif
00446 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00447   integrate_diff_init(&Mesh);
00448 #endif
00449 
00450 /*--- Step 8. ----------------------------------------------------------------*/
00451 /* Setup complete, output initial conditions */
00452 
00453   if(out_level >= 0){
00454     fp = athout_fp();
00455     par_dump(0,fp);      /* Dump a copy of the parsed information to athout */
00456   }
00457   change_rundir(rundir); /* Change to run directory */
00458   ath_sig_init();        /* Install a signal handler */
00459   for (nl=1; nl<(Mesh.NLevels); nl++){
00460     sprintf(level_dir,"lev%d",nl);
00461     mkdir(level_dir, 0775); /* Create directories for levels > 0 */
00462   }
00463 
00464   gettimeofday(&tvs,NULL);
00465   if((have_times = times(&tbuf)) > 0)
00466     time0 = tbuf.tms_utime + tbuf.tms_stime;
00467   else
00468     time0 = clock();
00469 
00470 /* Force output of everything (by passing last argument of data_output = 1) */
00471 
00472   if (ires==0) data_output(&Mesh, 1);
00473 
00474   ath_pout(0,"\nSetup complete, entering main loop...\n\n");
00475   ath_pout(0,"cycle=%i time=%e next dt=%e\n",Mesh.nstep, Mesh.time, Mesh.dt);
00476 
00477 /*--- Step 9. ----------------------------------------------------------------*/
00478 /* START OF MAIN INTEGRATION LOOP ==============================================
00479  * Steps are: (a) Check for data ouput
00480  *            (b) Add explicit diffusion with operator splitting
00481  *            (c) Integrate all Grids over Mesh hierarchy
00482  *            (d) Restrict solution and correct fine/coarse boundaries
00483  *            (e) Userwork
00484  *            (f) Self-gravity
00485  *            (g) Update time, set new timestep
00486  *            (h) Set boundary values
00487  *            (i) check for stopping criteria
00488  */
00489 
00490   while (Mesh.time < tlim && (nlim < 0 || Mesh.nstep < nlim)) {
00491 
00492 /*--- Step 9a. ---------------------------------------------------------------*/
00493 /* Only write output's with t_out>t (last argument of data_output = 0) */
00494 
00495     data_output(&Mesh, 0);
00496 
00497 /*--- Step 9b. ---------------------------------------------------------------*/
00498 /* operator-split explicit diffusion: thermal conduction, viscosity, resistivity
00499  * Done first since CFL constraint is applied which may change dt  */
00500 
00501 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00502     integrate_diff(&Mesh);
00503     for (nl=0; nl<(Mesh.NLevels); nl++){ 
00504       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00505         if (Mesh.Domain[nl][nd].Grid != NULL){
00506           bvals_mhd(&(Mesh.Domain[nl][nd]));
00507         }
00508       }
00509     }
00510 #endif
00511 
00512 /*--- Step 9c. ---------------------------------------------------------------*/
00513 /* Loop over all Domains and call Integrator */
00514 
00515     for (nl=0; nl<(Mesh.NLevels); nl++){ 
00516       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00517         if (Mesh.Domain[nl][nd].Grid != NULL){
00518           (*Integrate)(&(Mesh.Domain[nl][nd]));
00519 
00520 #ifdef FARGO
00521           Fargo(&(Mesh.Domain[nl][nd]));
00522 #ifdef PARTICLES
00523           advect_particles(&level0_Grid, &level0_Domain);
00524 #endif
00525 #endif /* FARGO */
00526         }
00527       }
00528     }
00529 
00530 /*--- Step 9d. ---------------------------------------------------------------*/
00531 /* With SMR, restrict solution from Child --> Parent grids  */
00532 
00533 #ifdef STATIC_MESH_REFINEMENT
00534     RestrictCorrect(&Mesh);
00535 #endif
00536 
00537 /*--- Step 9e. ---------------------------------------------------------------*/
00538 /* User work (defined in problem()) */
00539 
00540     Userwork_in_loop(&Mesh);
00541 
00542 /*--- Step 9f. ---------------------------------------------------------------*/
00543 /* Compute gravitational potential using new density, and add second-order
00544  * correction to fluxes for accelerations due to self-gravity. */
00545 
00546 #ifdef SELF_GRAVITY
00547     for (nl=0; nl<(Mesh.NLevels); nl++){ 
00548       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00549         if (Mesh.Domain[nl][nd].Grid != NULL){
00550           (*SelfGrav)(&(Mesh.Domain[nl][nd]));
00551           bvals_grav(&(Mesh.Domain[nl][nd]));
00552           selfg_fc(&(Mesh.Domain[nl][nd]));
00553         }
00554       }
00555     }
00556 #endif
00557 
00558 /*--- Step 9g. ---------------------------------------------------------------*/
00559 /* Update Mesh time, and time in all Grid's.  Compute new dt */
00560 
00561     Mesh.nstep++;
00562     Mesh.time += Mesh.dt;
00563     for (nl=0; nl<(Mesh.NLevels); nl++){
00564       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){
00565         if (Mesh.Domain[nl][nd].Grid != NULL){
00566           Mesh.Domain[nl][nd].Grid->time = Mesh.time;
00567         }
00568       }
00569     }
00570 
00571     dt_done = Mesh.dt;
00572     new_dt(&Mesh);
00573 
00574 /*--- Step 9h. ---------------------------------------------------------------*/
00575 /* Boundary values must be set after time is updated for t-dependent BCs.
00576  * With SMR, ghost zones at internal fine/coarse boundaries set by Prolongate */
00577 
00578     for (nl=0; nl<(Mesh.NLevels); nl++){ 
00579       for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00580         if (Mesh.Domain[nl][nd].Grid != NULL){
00581           bvals_mhd(&(Mesh.Domain[nl][nd]));
00582 #ifdef PARTICLES
00583           bvals_particle(&level0_Grid, &level0_Domain);
00584 #endif
00585         }
00586       }
00587     }
00588 
00589 #ifdef STATIC_MESH_REFINEMENT
00590     Prolongate(&Mesh);
00591 #endif
00592 
00593 /*--- Step 9i. ---------------------------------------------------------------*/
00594 /* Force quit if wall time limit reached.  Check signals from system */
00595 
00596 #ifdef MPI_PARALLEL
00597     if(use_wtlim && (MPI_Wtime() > wtend))
00598       iquit = 103; /* an arbitrary, unused signal number */
00599 #endif /* MPI_PARALLEL */
00600     if(ath_sig_act(&iquit) != 0) break;
00601 
00602 /* Print diagnostic message, flush message buffers, and continue... */
00603 
00604     ath_pout(0,"cycle=%i time=%e next dt=%e last dt=%e\n",
00605              Mesh.nstep,Mesh.time,Mesh.dt,dt_done);
00606 
00607     if(nflush == Mesh.nstep){
00608       ath_flush_out();
00609       ath_flush_err();
00610       nflush += iflush;
00611     }
00612   } /* END OF MAIN INTEGRATION LOOP ==========================================*/
00613 
00614 /*--- Step 10. ---------------------------------------------------------------*/
00615 /* Finish up by computing zc/sec, dumping data, and deallocate memory */
00616 
00617 /* Print diagnostic message as to why run terminated */
00618 
00619   if (Mesh.nstep == nlim)
00620     ath_pout(0,"\nterminating on cycle limit\n");
00621 #ifdef MPI_PARALLEL
00622   else if(use_wtlim && iquit == 103)
00623     ath_pout(0,"\nterminating on wall-time limit\n");
00624 #endif /* MPI_PARALLEL */
00625   else
00626     ath_pout(0,"\nterminating on time limit\n");
00627 
00628 /* Get time used */
00629 
00630   gettimeofday(&tve,NULL);
00631   if(have_times > 0) {
00632     times(&tbuf);
00633     time1 = tbuf.tms_utime + tbuf.tms_stime;
00634     cpu_time = (time1 > time0 ? (double)(time1 - time0) : 1.0)/
00635       (double)clk_tck;
00636   } else {
00637     time1 = clock();
00638     cpu_time = (time1 > time0 ? (double)(time1 - time0) : 1.0)/
00639       (double)CLOCKS_PER_SEC;
00640   }
00641 
00642 /* Calculate and print the zone-cycles/cpu-second on this processor */
00643 
00644   zones = 0;
00645   for (nl=0; nl<(Mesh.NLevels); nl++){ 
00646   for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00647     if (Mesh.Domain[nl][nd].Grid != NULL) {
00648       zones += (Mesh.Domain[nl][nd].Grid->Nx[0])*
00649                (Mesh.Domain[nl][nd].Grid->Nx[1])*
00650                (Mesh.Domain[nl][nd].Grid->Nx[2]);
00651     }
00652   }}
00653   zcs = (double)zones*(double)((Mesh.nstep) - nstep_start)/cpu_time;
00654 
00655   ath_pout(0,"  tlim= %e   nlim= %i\n",tlim,nlim);
00656   ath_pout(0,"  time= %e  cycle= %i\n",Mesh.time,Mesh.nstep);
00657   ath_pout(0,"\nzone-cycles/cpu-second = %e\n",zcs);
00658 
00659 /* Calculate and print the zone-cycles/wall-second on this processor */
00660 
00661   cpu_time = (double)(tve.tv_sec - tvs.tv_sec) +
00662     1.0e-6*(double)(tve.tv_usec - tvs.tv_usec);
00663   zcs = (double)zones*(double)((Mesh.nstep) - nstep_start)/cpu_time;
00664   ath_pout(0,"\nelapsed wall time = %e sec.\n",cpu_time);
00665   ath_pout(0,"\nzone-cycles/wall-second = %e\n",zcs);
00666 
00667 /* Calculate and print total zone-cycles/wall-second on all processors */
00668 #ifdef MPI_PARALLEL
00669   zones = 0;
00670   for (nl=0; nl<(Mesh.NLevels); nl++){ 
00671   for (nd=0; nd<(Mesh.DomainsPerLevel[nl]); nd++){  
00672     zones += (Mesh.Domain[nl][nd].Nx[0])*
00673                (Mesh.Domain[nl][nd].Nx[1])*
00674                (Mesh.Domain[nl][nd].Nx[2]);
00675   }}
00676   zcs = (double)zones*(double)(Mesh.nstep - nstep_start)/cpu_time;
00677   ath_pout(0,"\ntotal zone-cycles/wall-second = %e\n",zcs);
00678 #endif /* MPI_PARALLEL */
00679 
00680 /* complete any final User work */
00681 
00682   Userwork_after_loop(&Mesh);
00683 
00684 /* Final output everything (last argument of data_output = 1) */
00685 
00686   data_output(&Mesh, 1);
00687 
00688 /* Free all memory */
00689 
00690   lr_states_destruct();
00691   integrate_destruct();
00692   data_output_destruct();
00693 #ifdef PARTICLES
00694   particle_destruct(&level0_Grid);
00695   bvals_particle_destruct(&level0_Grid);
00696 #endif
00697 #if defined(SHEARING_BOX) || (defined(FARGO) && defined(CYLINDRICAL))
00698   bvals_shear_destruct();
00699 #endif
00700 #if defined(RESISTIVITY) || defined(VISCOSITY) || defined(THERMAL_CONDUCTION)
00701   integrate_diff_destruct();
00702 #endif
00703   par_close();
00704 
00705 #ifdef MPI_PARALLEL
00706   MPI_Finalize();
00707 #endif /* MPI_PARALLEL */
00708 
00709   if(time(&stop)>0) /* current calendar time (UTC) is available */
00710     ath_pout(0,"\nSimulation terminated on %s",ctime(&stop));
00711 
00712   ath_log_close(); /* close the simulation log files */
00713 
00714   return EXIT_SUCCESS;
00715 }
00716 
00717 /*============================================================================*/
00718 /*----------------------------------------------------------------------------*/
00719 /*! \fn void change_rundir(const char *name) 
00720  *  \brief Change run directory;  create it if it does not exist yet
00721  */
00722 
00723 void change_rundir(const char *name)
00724 {
00725 #ifdef MPI_PARALLEL
00726 
00727   int err=0;
00728   int rerr, gerr, my_id, status;
00729   char mydir[80];
00730 
00731   status = MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
00732   if(status != MPI_SUCCESS)
00733     ath_error("[change_rundir]: MPI_Comm_rank error = %d\n",status);
00734 
00735   if(name != NULL && *name != '\0'){
00736 
00737     if(my_id == 0)
00738       mkdir(name, 0775); /* May return an error, e.g. the directory exists */
00739 
00740     MPI_Barrier(MPI_COMM_WORLD); /* Wait for rank 0 to mkdir() */
00741 
00742     baton_start(MAX_FILE_OP, ch_rundir0_tag);
00743 
00744     if(chdir(name)){
00745       ath_perr(-1,"[change_rundir]: Cannot change directory to \"%s\"\n",name);
00746       err = 1;
00747     }
00748 
00749     baton_stop(MAX_FILE_OP, ch_rundir0_tag);
00750 
00751     /* Did anyone fail to make and change to the run directory? */
00752     rerr = MPI_Allreduce(&err, &gerr, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
00753     if(rerr) ath_perr(-1,"[change_rundir]: MPI_Allreduce error = %d\n",rerr);
00754 
00755     if(rerr || gerr){
00756       MPI_Abort(MPI_COMM_WORLD, 1);
00757       exit(EXIT_FAILURE);
00758     }
00759   }
00760 
00761 /* Next, change to the local run directory */
00762 
00763   sprintf(mydir, "id%d", my_id);
00764 
00765   baton_start(MAX_FILE_OP, ch_rundir1_tag);
00766 
00767   mkdir(mydir, 0775); /* May return an error, e.g. the directory exists */
00768   if(chdir(mydir)){
00769     ath_perr(-1,"[change_rundir]: Cannot change directory to \"%s\"\n",mydir);
00770     err = 1;
00771   }
00772 
00773   baton_stop(MAX_FILE_OP, ch_rundir1_tag);
00774 
00775   /* Did anyone fail to make and change to the local run directory? */
00776   rerr = MPI_Allreduce(&err, &gerr, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
00777   if(rerr) ath_perr(-1,"[change_rundir]: MPI_Allreduce error = %d\n",rerr);
00778 
00779   if(rerr || gerr){
00780     MPI_Abort(MPI_COMM_WORLD, 1);
00781     exit(EXIT_FAILURE);
00782   }
00783 
00784 #else /* Serial job */
00785 
00786   if(name == NULL || *name == '\0') return;
00787 
00788   mkdir(name, 0775); /* May return an error, e.g. the directory exists */
00789   if(chdir(name))
00790     ath_error("[change_rundir]: Cannot change directory to \"%s\"\n",name);
00791 
00792 #endif /* MPI_PARALLEL */
00793 
00794   return;
00795 }
00796 
00797 /*----------------------------------------------------------------------------*/
00798 /*! \fn static void usage(const char *prog)
00799  *  \brief Outputs help
00800  *
00801  *    athena_version is hardwired at beginning of this file
00802  *    CONFIGURE_DATE is macro set when configure script runs
00803  */
00804 
00805 static void usage(const char *prog)
00806 {
00807   ath_perr(-1,"Athena %s\n",athena_version);
00808   ath_perr(-1,"  Last configure: %s\n",CONFIGURE_DATE);
00809   ath_perr(-1,"\nUsage: %s [options] [block/par=value ...]\n",prog);
00810   ath_perr(-1,"\nOptions:\n");
00811   ath_perr(-1,"  -i <file>       Alternate input file [athinput]\n");
00812   ath_perr(-1,"  -r <file>       Restart a simulation with this file\n");
00813   ath_perr(-1,"  -d <directory>  Alternate run dir [current dir]\n");
00814   ath_perr(-1,"  -h              This Help, and configuration settings\n");
00815   ath_perr(-1,"  -n              Parse input, but don't run program\n");
00816   ath_perr(-1,"  -c              Show Configuration details and quit\n");
00817   ath_perr(-1,"  -t hh:mm:ss     With MPI, wall time limit for final output\n");
00818   show_config();
00819   exit(0);
00820 }

Generated on Mon Sep 27 2010 23:03:07 for Athena by  doxygen 1.7.1