Actual source code: ex4f.F90

  1: !
  2: !     This introductory example illustrates running PETSc on a subset
  3: !     of processes
  4: !
  5: ! -----------------------------------------------------------------------

  7: program main
  8: #include <petsc/finclude/petscsys.h>
  9:   use petscmpi  ! or mpi or mpi_f08
 10:   use petscsys
 11:   implicit none
 12:   PetscErrorCode ierr
 13:   PetscMPIInt rank, size, grank, zero, two
 14:   PetscReal globalrank

 16: !     We must call MPI_Init() first, making us, not PETSc, responsible for MPI

 18:   PetscCallMPIA(MPI_Init(ierr))
 19: #if defined(PETSC_HAVE_ELEMENTAL)
 20:   PetscCallA(PetscElementalInitializePackage(ierr))
 21: #endif
 22: !     We can now change the communicator universe for PETSc

 24:   zero = 0
 25:   two = 2
 26:   PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
 27:   PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))

 29: !     Every PETSc routine should begin with the PetscInitialize()
 30: !     routine.
 31:   PetscCallA(PetscInitializeNoArguments(ierr))

 33: !     The following MPI calls return the number of processes being used
 34: !     and the rank of this process in the group.

 36:   PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
 37:   PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))

 39: !     Here we would like to print only one message that represents all
 40: !     the processes in the group. Sleep so that IO from different ranks
 41: !     don't get mixed up. Note this is not an ideal solution
 42:   PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
 43:   globalrank = grank
 44:   PetscCallA(PetscSleep(globalrank, ierr))
 45:   if (rank == 0) write (6, 100) size, rank
 46: 100 format('No of Procs = ', i4, ' rank = ', i4)

 48: !     Always call PetscFinalize() before exiting a program.  This
 49: !     routine - finalizes the PETSc libraries as well as MPI - provides
 50: !     summary and diagnostic information if certain runtime options are
 51: !     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
 52: !     information.

 54:   PetscCallA(PetscFinalize(ierr))
 55:   PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
 56: #if defined(PETSC_HAVE_ELEMENTAL)
 57:   PetscCallA(PetscElementalFinalizePackage(ierr))
 58: #endif

 60: !     Since we initialized MPI, we must call MPI_Finalize()

 62:   PetscCallMPIA(MPI_Finalize(ierr))
 63: end

 65: !/*TEST
 66: !
 67: !   test:
 68: !      nsize: 5
 69: !      filter: sort -b
 70: !      filter_output: sort -b
 71: !      requires: !cuda !saws
 72: !
 73: !TEST*/