Actual source code: ex4f.F90
1: !
2: ! This introductory example illustrates running PETSc on a subset
3: ! of processes
4: !
5: ! -----------------------------------------------------------------------
6: #include <petsc/finclude/petscsys.h>
7: program main
8: use petscsys
9: implicit none
10: PetscErrorCode ierr
11: PetscMPIInt rank, size, grank, zero, two
12: PetscReal globalrank
14: ! We must call MPI_Init() first, making us, not PETSc, responsible for MPI
16: PetscCallMPIA(MPI_Init(ierr))
17: #if defined(PETSC_HAVE_ELEMENTAL)
18: PetscCallA(PetscElementalInitializePackage(ierr))
19: #endif
20: ! We can now change the communicator universe for PETSc
22: zero = 0
23: two = 2
24: PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
25: PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))
27: ! Every PETSc routine should begin with the PetscInitialize()
28: ! routine.
29: PetscCallA(PetscInitializeNoArguments(ierr))
31: ! The following MPI calls return the number of processes being used
32: ! and the rank of this process in the group.
34: PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
35: PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))
37: ! Here we would like to print only one message that represents all
38: ! the processes in the group. Sleep so that IO from different ranks
39: ! don't get mixed up. Note this is not an ideal solution
40: PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
41: globalrank = grank
42: PetscCallA(PetscSleep(globalrank, ierr))
43: if (rank == 0) write (6, 100) size, rank
44: 100 format('No of Procs = ', i4, ' rank = ', i4)
46: ! Always call PetscFinalize() before exiting a program. This
47: ! routine - finalizes the PETSc libraries as well as MPI - provides
48: ! summary and diagnostic information if certain runtime options are
49: ! chosen (e.g., -log_view). See PetscFinalize() manpage for more
50: ! information.
52: PetscCallA(PetscFinalize(ierr))
53: PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
54: #if defined(PETSC_HAVE_ELEMENTAL)
55: PetscCallA(PetscElementalFinalizePackage(ierr))
56: #endif
58: ! Since we initialized MPI, we must call MPI_Finalize()
60: PetscCallMPIA(MPI_Finalize(ierr))
61: end
63: !/*TEST
64: !
65: ! test:
66: ! nsize: 5
67: ! filter: sort -b
68: ! filter_output: sort -b
69: ! requires: !cuda !saws
70: !
71: !TEST*/