Actual source code: mpiu.c

  1: #include <petscsys.h>
  2: #include <petsc/private/petscimpl.h>
  3: /*
  4:     Note that tag of 0 is ok because comm is a private communicator
  5:   generated below just for these routines.
  6: */

  8: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm, int ng)
  9: {
 10:   PetscMPIInt rank, size, tag = 0;
 11:   MPI_Status  status;

 13:   PetscFunctionBegin;
 14:   PetscCallMPI(MPI_Comm_size(comm, &size));
 15:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
 16:   PetscCallMPI(MPI_Comm_rank(comm, &rank));
 17:   if (rank) PetscCallMPI(MPI_Recv(NULL, 0, MPI_INT, rank - 1, tag, comm, &status));
 18:   /* Send to the next process in the group unless we are the last process */
 19:   if ((rank % ng) < ng - 1 && rank != size - 1) PetscCallMPI(MPI_Send(NULL, 0, MPI_INT, rank + 1, tag, comm));
 20:   PetscFunctionReturn(PETSC_SUCCESS);
 21: }

 23: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm, int ng)
 24: {
 25:   PetscMPIInt rank, size, tag = 0;
 26:   MPI_Status  status;

 28:   PetscFunctionBegin;
 29:   PetscCallMPI(MPI_Comm_rank(comm, &rank));
 30:   PetscCallMPI(MPI_Comm_size(comm, &size));
 31:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);

 33:   /* Send to the first process in the next group */
 34:   if ((rank % ng) == ng - 1 || rank == size - 1) PetscCallMPI(MPI_Send(NULL, 0, MPI_INT, (rank + 1) % size, tag, comm));
 35:   if (rank == 0) PetscCallMPI(MPI_Recv(NULL, 0, MPI_INT, size - 1, tag, comm, &status));
 36:   PetscFunctionReturn(PETSC_SUCCESS);
 37: }

 39: /*
 40:     The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
 41:   is attached to a communicator that manages the sequential phase code below.
 42: */
 43: PetscMPIInt Petsc_Seq_keyval = MPI_KEYVAL_INVALID;

 45: /*@
 46:   PetscSequentialPhaseBegin - Begins a sequential section of code.

 48:   Collective

 50:   Input Parameters:
 51: + comm - Communicator to sequentialize over
 52: - ng   - Number in processor group.  This many processes are allowed to execute
 53:    at the same time (usually 1)

 55:   Level: intermediate

 57:   Notes:
 58:   `PetscSequentialPhaseBegin()` and `PetscSequentialPhaseEnd()` provide a
 59:   way to force a section of code to be executed by the processes in
 60:   rank order.  Typically, this is done with
 61: .vb
 62:       PetscSequentialPhaseBegin(comm, 1);
 63:       <code to be executed sequentially>
 64:       PetscSequentialPhaseEnd(comm, 1);
 65: .ve

 67:   You should use `PetscSynchronizedPrintf()` to ensure output between MPI ranks is properly order and not these routines.

 69: .seealso: `PetscSequentialPhaseEnd()`, `PetscSynchronizedPrintf()`
 70: @*/
 71: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm, int ng)
 72: {
 73:   PetscMPIInt size;
 74:   MPI_Comm    local_comm, *addr_local_comm;

 76:   PetscFunctionBegin;
 77:   PetscCall(PetscSysInitializePackage());
 78:   PetscCallMPI(MPI_Comm_size(comm, &size));
 79:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);

 81:   /* Get the private communicator for the sequential operations */
 82:   if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Seq_keyval, NULL));

 84:   PetscCallMPI(MPI_Comm_dup(comm, &local_comm));
 85:   PetscCall(PetscMalloc1(1, &addr_local_comm));

 87:   *addr_local_comm = local_comm;

 89:   PetscCallMPI(MPI_Comm_set_attr(comm, Petsc_Seq_keyval, (void *)addr_local_comm));
 90:   PetscCall(PetscSequentialPhaseBegin_Private(local_comm, ng));
 91:   PetscFunctionReturn(PETSC_SUCCESS);
 92: }

 94: /*@
 95:   PetscSequentialPhaseEnd - Ends a sequential section of code.

 97:   Collective

 99:   Input Parameters:
100: + comm - Communicator to sequentialize.
101: - ng   - Number in processor group.  This many processes are allowed to execute
102:    at the same time (usually 1)

104:   Level: intermediate

106:   Note:
107:   See `PetscSequentialPhaseBegin()` for more details.

109: .seealso: `PetscSequentialPhaseBegin()`
110: @*/
111: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm, int ng)
112: {
113:   PetscMPIInt size, iflg;
114:   MPI_Comm    local_comm, *addr_local_comm;

116:   PetscFunctionBegin;
117:   PetscCallMPI(MPI_Comm_size(comm, &size));
118:   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);

120:   PetscCallMPI(MPI_Comm_get_attr(comm, Petsc_Seq_keyval, (void **)&addr_local_comm, &iflg));
121:   PetscCheck(iflg, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()");
122:   local_comm = *addr_local_comm;

124:   PetscCall(PetscSequentialPhaseEnd_Private(local_comm, ng));

126:   PetscCall(PetscFree(addr_local_comm));
127:   PetscCallMPI(MPI_Comm_free(&local_comm));
128:   PetscCallMPI(MPI_Comm_delete_attr(comm, Petsc_Seq_keyval));
129:   PetscFunctionReturn(PETSC_SUCCESS);
130: }

132: /*@
133:   PetscGlobalMinMaxInt - Get the global min/max from local min/max input

135:   Collective

137:   Input Parameters:
138: + comm      - The MPI communicator to reduce with
139: - minMaxVal - An array with the local min and max

141:   Output Parameter:
142: . minMaxValGlobal - An array with the global min and max

144:   Level: beginner

146: .seealso: `PetscSplitOwnership()`, `PetscGlobalMinMaxReal()`
147: @*/
148: PetscErrorCode PetscGlobalMinMaxInt(MPI_Comm comm, const PetscInt minMaxVal[2], PetscInt minMaxValGlobal[2])
149: {
150:   PetscInt  sendbuf[3], recvbuf[3];
151:   PetscBool hasminint = (PetscBool)(minMaxVal[0] == PETSC_MIN_INT);

153:   PetscFunctionBegin;
154:   sendbuf[0] = hasminint ? PETSC_MIN_INT : -minMaxVal[0]; /* Note that -PETSC_INT_MIN = PETSC_INT_MIN: ternary to suppress sanitizer warnings */
155:   sendbuf[1] = minMaxVal[1];
156:   sendbuf[2] = hasminint ? 1 : 0; /* Are there PETSC_INT_MIN in minMaxVal[0]? */
157:   PetscCallMPI(MPIU_Allreduce(sendbuf, recvbuf, 3, MPIU_INT, MPI_MAX, comm));
158:   minMaxValGlobal[0] = recvbuf[2] ? PETSC_INT_MIN : -recvbuf[0];
159:   minMaxValGlobal[1] = recvbuf[1];
160:   PetscFunctionReturn(PETSC_SUCCESS);
161: }

163: /*@
164:   PetscGlobalMinMaxReal - Get the global min/max from local min/max input

166:   Collective

168:   Input Parameters:
169: + comm      - The MPI communicator to reduce with
170: - minMaxVal - An array with the local min and max

172:   Output Parameter:
173: . minMaxValGlobal - An array with the global min and max

175:   Level: beginner

177: .seealso: `PetscSplitOwnership()`, `PetscGlobalMinMaxInt()`
178: @*/
179: PetscErrorCode PetscGlobalMinMaxReal(MPI_Comm comm, const PetscReal minMaxVal[2], PetscReal minMaxValGlobal[2])
180: {
181:   PetscReal sendbuf[2];

183:   PetscFunctionBegin;
184:   sendbuf[0] = -minMaxVal[0];
185:   sendbuf[1] = minMaxVal[1];
186:   PetscCallMPI(MPIU_Allreduce(sendbuf, minMaxValGlobal, 2, MPIU_REAL, MPIU_MAX, comm));
187:   minMaxValGlobal[0] = -minMaxValGlobal[0];
188:   PetscFunctionReturn(PETSC_SUCCESS);
189: }