Actual source code: ex4f.F90

  1: !
  2: !     This introductory example illustrates running PETSc on a subset
  3: !     of processes
  4: !
  5: ! -----------------------------------------------------------------------
  6: #include <petsc/finclude/petscsys.h>
  7: program main
  8:   use petscmpi  ! or mpi or mpi_f08
  9:   use petscsys
 10:   implicit none
 11:   PetscErrorCode ierr
 12:   PetscMPIInt rank, size, grank, zero, two
 13:   PetscReal globalrank

 15: !     We must call MPI_Init() first, making us, not PETSc, responsible for MPI

 17:   PetscCallMPIA(MPI_Init(ierr))
 18: #if defined(PETSC_HAVE_ELEMENTAL)
 19:   PetscCallA(PetscElementalInitializePackage(ierr))
 20: #endif
 21: !     We can now change the communicator universe for PETSc

 23:   zero = 0
 24:   two = 2
 25:   PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
 26:   PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))

 28: !     Every PETSc routine should begin with the PetscInitialize()
 29: !     routine.
 30:   PetscCallA(PetscInitializeNoArguments(ierr))

 32: !     The following MPI calls return the number of processes being used
 33: !     and the rank of this process in the group.

 35:   PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
 36:   PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))

 38: !     Here we would like to print only one message that represents all
 39: !     the processes in the group. Sleep so that IO from different ranks
 40: !     don't get mixed up. Note this is not an ideal solution
 41:   PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
 42:   globalrank = grank
 43:   PetscCallA(PetscSleep(globalrank, ierr))
 44:   if (rank == 0) write (6, 100) size, rank
 45: 100 format('No of Procs = ', i4, ' rank = ', i4)

 47: !     Always call PetscFinalize() before exiting a program.  This
 48: !     routine - finalizes the PETSc libraries as well as MPI - provides
 49: !     summary and diagnostic information if certain runtime options are
 50: !     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
 51: !     information.

 53:   PetscCallA(PetscFinalize(ierr))
 54:   PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
 55: #if defined(PETSC_HAVE_ELEMENTAL)
 56:   PetscCallA(PetscElementalFinalizePackage(ierr))
 57: #endif

 59: !     Since we initialized MPI, we must call MPI_Finalize()

 61:   PetscCallMPIA(MPI_Finalize(ierr))
 62: end

 64: !/*TEST
 65: !
 66: !   test:
 67: !      nsize: 5
 68: !      filter: sort -b
 69: !      filter_output: sort -b
 70: !      requires: !cuda !saws
 71: !
 72: !TEST*/