Actual source code: ex4f.F90
1: !
2: ! This introductory example illustrates running PETSc on a subset
3: ! of processes
4: !
5: ! -----------------------------------------------------------------------
6: #include <petsc/finclude/petscsys.h>
7: program main
8: use petscsys
9: implicit none
10: PetscErrorCode ierr
11: PetscMPIInt, parameter :: zero = 0, two = 2
12: PetscMPIInt rank, size, grank
13: PetscReal globalrank
15: ! We must call MPI_Init() first, making us, not PETSc, responsible for MPI
17: PetscCallMPIA(MPI_Init(ierr))
18: #if defined(PETSC_HAVE_ELEMENTAL)
19: PetscCallA(PetscElementalInitializePackage(ierr))
20: #endif
21: ! We can now change the communicator universe for PETSc
22: PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
23: PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))
25: ! Every PETSc routine should begin with the PetscInitialize()
26: ! routine.
27: PetscCallA(PetscInitializeNoArguments(ierr))
29: ! The following MPI calls return the number of processes being used
30: ! and the rank of this process in the group.
31: PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
32: PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))
34: ! Here we would like to print only one message that represents all
35: ! the processes in the group. Sleep so that IO from different ranks
36: ! don't get mixed up. Note this is not an ideal solution
37: PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
38: globalrank = grank
39: PetscCallA(PetscSleep(globalrank, ierr))
40: if (rank == 0) write (6, 100) size, rank
41: 100 format('No of Procs = ', i4, ' rank = ', i4)
43: ! Always call PetscFinalize() before exiting a program. This
44: ! routine - finalizes the PETSc libraries as well as MPI - provides
45: ! summary and diagnostic information if certain runtime options are
46: ! chosen (e.g., -log_view). See PetscFinalize() manpage for more
47: ! information.
49: PetscCallA(PetscFinalize(ierr))
50: PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
51: #if defined(PETSC_HAVE_ELEMENTAL)
52: PetscCallA(PetscElementalFinalizePackage(ierr))
53: #endif
55: ! Since we initialized MPI, we must call MPI_Finalize()
56: PetscCallMPIA(MPI_Finalize(ierr))
57: end
59: !/*TEST
60: !
61: ! test:
62: ! nsize: 5
63: ! filter: sort -b
64: ! filter_output: sort -b
65: ! requires: !cuda !saws
66: !
67: !TEST*/