Actual source code: pinit.c

  1: #define PETSC_DESIRE_FEATURE_TEST_MACROS
  2: /*
  3:    This file defines the initialization of PETSc, including PetscInitialize()
  4: */
  5: #include <petsc/private/petscimpl.h>
  6: #include <petsc/private/logimpl.h>
  7: #include <petscviewer.h>
  8: #include <petsc/private/garbagecollector.h>

 10: #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
 11: #include <petsc/private/valgrind/valgrind.h>
 12: #endif

 14: #if defined(PETSC_USE_FORTRAN_BINDINGS)
 15: #include <petsc/private/ftnimpl.h>
 16: #endif

 18: #if PetscDefined(USE_COVERAGE)
 19: EXTERN_C_BEGIN
 20:   #if defined(PETSC_HAVE___GCOV_DUMP)
 22:   #endif
 23: void __gcov_flush(void);
 24: EXTERN_C_END
 25: #endif

 27: #if defined(PETSC_SERIALIZE_FUNCTIONS)
 28: PETSC_INTERN PetscFPT PetscFPTData;
 29: PetscFPT              PetscFPTData = 0;
 30: #endif

 32: #if PetscDefined(HAVE_SAWS)
 33: #include <petscviewersaws.h>
 34: #endif

 36: PETSC_INTERN FILE *petsc_history;

 38: PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
 39: PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
 40: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
 41: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
 42: PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);

 44: /* user may set these BEFORE calling PetscInitialize() */
 45: MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
 46: #if PetscDefined(HAVE_MPI_INIT_THREAD)
 47: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
 48: #else
 49: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
 50: #endif

 52: PetscMPIInt Petsc_Counter_keyval      = MPI_KEYVAL_INVALID;
 53: PetscMPIInt Petsc_InnerComm_keyval    = MPI_KEYVAL_INVALID;
 54: PetscMPIInt Petsc_OuterComm_keyval    = MPI_KEYVAL_INVALID;
 55: PetscMPIInt Petsc_ShmComm_keyval      = MPI_KEYVAL_INVALID;
 56: PetscMPIInt Petsc_CreationIdx_keyval  = MPI_KEYVAL_INVALID;
 57: PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;

 59: PetscMPIInt Petsc_SharedWD_keyval  = MPI_KEYVAL_INVALID;
 60: PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;

 62: /*
 63:      Declare and set all the string names of the PETSc enums
 64: */
 65: const char *const PetscBools[]     = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
 66: const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};

 68: PetscBool PetscPreLoadingUsed = PETSC_FALSE;
 69: PetscBool PetscPreLoadingOn   = PETSC_FALSE;

 71: PetscInt PetscHotRegionDepth;

 73: PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;

 75: #if defined(PETSC_HAVE_THREADSAFETY)
 76: PetscSpinlock PetscViewerASCIISpinLockOpen;
 77: PetscSpinlock PetscViewerASCIISpinLockStdout;
 78: PetscSpinlock PetscViewerASCIISpinLockStderr;
 79: PetscSpinlock PetscCommSpinLock;
 80: #endif

 82: extern PetscInt PetscNumBLASThreads;

 84: /*@C
 85:   PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args

 87:   Collective, No Fortran Support

 89:   Input Parameters:
 90: + argc     - number of args
 91: . args     - array of command line arguments
 92: . filename - optional name of the program file, pass `NULL` to ignore
 93: - help     - optional help, pass `NULL` to ignore

 95:   Level: advanced

 97:   Notes:
 98:   this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
 99:   indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
100:   be called multiple times from Julia without the problem of trying to initialize MPI more than once.

102:   Developer Notes:
103:   Turns off PETSc signal handling to allow Julia to manage signals

105: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
106: */
107: PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
108: {
109:   int    myargc = argc;
110:   char **myargs = args;

112:   PetscFunctionBegin;
113:   PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
114:   PetscCall(PetscPopSignalHandler());
115:   PetscBeganMPI = PETSC_FALSE;
116:   PetscFunctionReturn(PETSC_SUCCESS);
117: }

119: /*@C
120:   PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
121:   the command line arguments.

123:   Collective

125:   Level: advanced

127: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
128: @*/
129: PetscErrorCode PetscInitializeNoArguments(void) PeNS
130: {
131:   int    argc = 0;
132:   char **args = NULL;

134:   PetscFunctionBegin;
135:   PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
136:   PetscFunctionReturn(PETSC_SUCCESS);
137: }

139: /*@
140:   PetscInitialized - Determine whether PETSc is initialized.

142:   Output Parameter:
143: . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise

145:   Level: beginner

147: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
148: @*/
149: PetscErrorCode PetscInitialized(PetscBool *isInitialized)
150: {
151:   PetscFunctionBegin;
152:   if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
153:   *isInitialized = PetscInitializeCalled;
154:   PetscFunctionReturn(PETSC_SUCCESS);
155: }

157: /*@
158:   PetscFinalized - Determine whether `PetscFinalize()` has been called yet

160:   Output Parameter:
161: . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise

163:   Level: developer

165: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
166: @*/
167: PetscErrorCode PetscFinalized(PetscBool *isFinalized)
168: {
169:   PetscFunctionBegin;
170:   if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
171:   *isFinalized = PetscFinalizeCalled;
172:   PetscFunctionReturn(PETSC_SUCCESS);
173: }

175: PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);

177: /*
178:        This function is the MPI reduction operation used to compute the sum of the
179:    first half of the datatype and the max of the second half.
180: */
181: MPI_Op MPIU_MAXSUM_OP               = 0;
182: MPI_Op Petsc_Garbage_SetIntersectOp = 0;

184: PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
185: {
186:   PetscFunctionBegin;
187:   if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
188: #if defined(PETSC_USE_64BIT_INDICES)
189:     struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
190:     PetscMPIInt                   count = *cnt;

192:     for (PetscMPIInt i = 0; i < count; i++) {
193:       xout[i].a = PetscMax(xout[i].a, xin[i].a);
194:       xout[i].b += xin[i].b;
195:     }
196: #endif
197:   } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
198:     PetscInt   *xin = (PetscInt *)in, *xout = (PetscInt *)out;
199:     PetscMPIInt count = *cnt;

201:     for (PetscMPIInt i = 0; i < count; i++) {
202:       xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
203:       xout[2 * i + 1] += xin[2 * i + 1];
204:     }
205:   } else {
206:     PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
207:     (void)ierr;
208:     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
209:   }
210:   PetscFunctionReturnVoid();
211: }

213: /*@
214:   PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.

216:   Collective

218:   Input Parameters:
219: + comm  - the communicator
220: - array - an arry of length 2 times `size`, the number of MPI processes

222:   Output Parameters:
223: + max - the maximum of `array[2*rank]` over all MPI processes
224: - sum - the sum of the `array[2*rank + 1]` over all MPI processes

226:   Level: developer

228: .seealso: `PetscInitialize()`
229: @*/
230: PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
231: {
232:   PetscFunctionBegin;
233: #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
234:   {
235:     struct {
236:       PetscInt max, sum;
237:     } work;
238:     PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
239:     *max = work.max;
240:     *sum = work.sum;
241:   }
242: #else
243:   {
244:     PetscMPIInt size, rank;
245:     struct {
246:       PetscInt max, sum;
247:     } *work;
248:     PetscCallMPI(MPI_Comm_size(comm, &size));
249:     PetscCallMPI(MPI_Comm_rank(comm, &rank));
250:     PetscCall(PetscMalloc1(size, &work));
251:     PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
252:     *max = work[rank].max;
253:     *sum = work[rank].sum;
254:     PetscCall(PetscFree(work));
255:   }
256: #endif
257:   PetscFunctionReturn(PETSC_SUCCESS);
258: }

260: #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
261:   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
262:     #include <quadmath.h>
263:   #endif
264: MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
265:   #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
266: MPI_Op MPIU_SUM = 0;
267:   #endif

269: PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
270: {
271:   PetscMPIInt i, count = *cnt;

273:   PetscFunctionBegin;
274:   if (*datatype == MPIU_REAL) {
275:     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
276:     for (i = 0; i < count; i++) xout[i] += xin[i];
277:   }
278:   #if defined(PETSC_HAVE_COMPLEX)
279:   else if (*datatype == MPIU_COMPLEX) {
280:     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
281:     for (i = 0; i < count; i++) xout[i] += xin[i];
282:   }
283:   #endif
284:   #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
285:   else if (*datatype == MPIU___FLOAT128) {
286:     __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
287:     for (i = 0; i < count; i++) xout[i] += xin[i];
288:     #if defined(PETSC_HAVE_COMPLEX)
289:   } else if (*datatype == MPIU___COMPLEX128) {
290:     __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
291:     for (i = 0; i < count; i++) xout[i] += xin[i];
292:     #endif
293:   }
294:   #endif
295:   #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
296:   else if (*datatype == MPIU___FP16) {
297:     __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
298:     for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
299:   }
300:   #endif
301:   else {
302:   #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
303:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
304:   #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
305:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
306:   #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
307:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
308:   #else
309:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
310:   #endif
311:     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
312:   }
313:   PetscFunctionReturnVoid();
314: }
315: #endif

317: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
318: MPI_Op MPIU_MAX = 0;
319: MPI_Op MPIU_MIN = 0;

321: PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
322: {
323:   PetscInt i, count = *cnt;

325:   PetscFunctionBegin;
326:   if (*datatype == MPIU_REAL) {
327:     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
328:     for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
329:   }
330:   #if defined(PETSC_HAVE_COMPLEX)
331:   else if (*datatype == MPIU_COMPLEX) {
332:     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
333:     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
334:   }
335:   #endif
336:   else {
337:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
338:     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
339:   }
340:   PetscFunctionReturnVoid();
341: }

343: PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
344: {
345:   PetscInt i, count = *cnt;

347:   PetscFunctionBegin;
348:   if (*datatype == MPIU_REAL) {
349:     PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
350:     for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
351:   }
352:   #if defined(PETSC_HAVE_COMPLEX)
353:   else if (*datatype == MPIU_COMPLEX) {
354:     PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
355:     for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
356:   }
357:   #endif
358:   else {
359:     PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
360:     PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
361:   }
362:   PetscFunctionReturnVoid();
363: }
364: #endif

366: /*
367:    Private routine to delete internal tag/name counter storage when a communicator is freed.

369:    This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this  data as an attribute is freed.

371:    Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()

373: */
374: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
375: {
376:   PetscCommCounter      *counter = (PetscCommCounter *)count_val;
377:   struct PetscCommStash *comms   = counter->comms, *pcomm;

379:   PetscFunctionBegin;
380:   PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
381:   PetscCallReturnMPI(PetscFree(counter->iflags));
382:   while (comms) {
383:     PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
384:     pcomm = comms;
385:     comms = comms->next;
386:     PetscCallReturnMPI(PetscFree(pcomm));
387:   }
388:   PetscCallReturnMPI(PetscFree(counter));
389:   PetscFunctionReturn(MPI_SUCCESS);
390: }

392: /*
393:   This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
394:   calls MPI_Comm_free().

396:   This is the only entry point for breaking the links between inner and outer comms.

398:   This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.

400:   Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()

402: */
403: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
404: {
405:   union
406:   {
407:     MPI_Comm comm;
408:     void    *ptr;
409:   } icomm;

411:   PetscFunctionBegin;
412:   PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
413:   icomm.ptr = attr_val;
414:   if (PetscDefined(USE_DEBUG)) {
415:     /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
416:     PetscMPIInt flg;
417:     union
418:     {
419:       MPI_Comm comm;
420:       void    *ptr;
421:     } ocomm;
422:     PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
423:     PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
424:     PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
425:   }
426:   PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
427:   PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
428:   PetscFunctionReturn(MPI_SUCCESS);
429: }

431: /*
432:  * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr().  It should not be reached any other way.
433:  */
434: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
435: {
436:   PetscFunctionBegin;
437:   PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
438:   PetscFunctionReturn(MPI_SUCCESS);
439: }

441: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);

443: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
444: PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
445: PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
446: PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447: #endif

449: PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;

451: PETSC_INTERN int    PetscGlobalArgc;
452: PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
453: int                 PetscGlobalArgc        = 0;
454: char              **PetscGlobalArgs        = NULL;
455: char              **PetscGlobalArgsFortran = NULL;
456: PetscSegBuffer      PetscCitationsList;

458: PetscErrorCode PetscCitationsInitialize(void)
459: {
460:   PetscFunctionBegin;
461:   PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));

463:   PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
464:   Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
465:     and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
466:     and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
467:     and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
468:     and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
469:     and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\
470:     and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
471:   Title = {{PETSc/TAO} Users Manual},\n\
472:   Number = {ANL-21/39 - Revision 3.23},\n\
473:   Doi = {10.2172/2476320},\n\
474:   Institution = {Argonne National Laboratory},\n\
475:   Year = {2025}\n}\n",
476:                                    NULL));

478:   PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
479:   Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
480:   Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
481:   Booktitle = {Modern Software Tools in Scientific Computing},\n\
482:   Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
483:   Pages = {163--202},\n\
484:   Publisher = {Birkh{\\\"{a}}user Press},\n\
485:   Year = {1997}\n}\n",
486:                                    NULL));
487:   PetscFunctionReturn(PETSC_SUCCESS);
488: }

490: static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */

492: PetscErrorCode PetscSetProgramName(const char name[])
493: {
494:   PetscFunctionBegin;
495:   PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
496:   PetscFunctionReturn(PETSC_SUCCESS);
497: }

499: /*@C
500:   PetscGetProgramName - Gets the name of the running program.

502:   Not Collective

504:   Input Parameter:
505: . len - length of the string name

507:   Output Parameter:
508: . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`

510:   Level: advanced

512: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
513: @*/
514: PetscErrorCode PetscGetProgramName(char name[], size_t len)
515: {
516:   PetscFunctionBegin;
517:   PetscCall(PetscStrncpy(name, programname, len));
518:   PetscFunctionReturn(PETSC_SUCCESS);
519: }

521: /*@C
522:   PetscGetArgs - Allows you to access the raw command line arguments anywhere
523:   after `PetscInitialize()` is called but before `PetscFinalize()`.

525:   Not Collective, No Fortran Support

527:   Output Parameters:
528: + argc - count of the number of command line arguments
529: - args - the command line arguments

531:   Level: intermediate

533:   Notes:
534:   This is usually used to pass the command line arguments into other libraries
535:   that are called internally deep in PETSc or the application.

537:   The first argument contains the program name as is normal for C programs.

539:   See `PetscGetArguments()` for a variant of this routine.

541: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
542: @*/
543: PetscErrorCode PetscGetArgs(int *argc, char ***args)
544: {
545:   PetscFunctionBegin;
546:   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
547:   *argc = PetscGlobalArgc;
548:   *args = PetscGlobalArgs;
549:   PetscFunctionReturn(PETSC_SUCCESS);
550: }

552: /*@C
553:   PetscGetArguments - Allows you to access the command line arguments anywhere
554:   after `PetscInitialize()` is called but before `PetscFinalize()`.

556:   Not Collective, No Fortran Support

558:   Output Parameter:
559: . args - the command line arguments

561:   Level: intermediate

563:   Note:
564:   This does NOT start with the program name and IS `NULL` terminated (the final argument is void)

566:   Use `PetscFreeArguments()` to return the memory used by the arguments.

568:   This makes a copy of the arguments and the array of arguments, while `PetscGetArgs()` does not make a copy,
569:   it returns the array of arguments that was passed into the main program.

571: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
572: @*/
573: PetscErrorCode PetscGetArguments(char ***args)
574: {
575:   PetscInt i, argc = PetscGlobalArgc;

577:   PetscFunctionBegin;
578:   PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
579:   if (!argc) {
580:     *args = NULL;
581:     PetscFunctionReturn(PETSC_SUCCESS);
582:   }
583:   PetscCall(PetscMalloc1(argc, args));
584:   for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
585:   (*args)[argc - 1] = NULL;
586:   PetscFunctionReturn(PETSC_SUCCESS);
587: }

589: /*@C
590:   PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`

592:   Not Collective, No Fortran Support

594:   Output Parameter:
595: . args - the command line arguments

597:   Level: intermediate

599:   Developer Note:
600:   This should be PetscRestoreArguments()

602: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
603: @*/
604: PetscErrorCode PetscFreeArguments(char **args)
605: {
606:   PetscFunctionBegin;
607:   if (args) {
608:     PetscInt i = 0;

610:     while (args[i]) PetscCall(PetscFree(args[i++]));
611:     PetscCall(PetscFree(args));
612:   }
613:   PetscFunctionReturn(PETSC_SUCCESS);
614: }

616: #if PetscDefined(HAVE_SAWS)
617:   #include <petscconfiginfo.h>

619: PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
620: {
621:   PetscFunctionBegin;
622:   if (!PetscGlobalRank) {
623:     char      cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
624:     int       port;
625:     PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
626:     size_t    applinelen, introlen;
627:     char      sawsurl[256];

629:     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
630:     if (flg) {
631:       char sawslog[PETSC_MAX_PATH_LEN];

633:       PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
634:       if (sawslog[0]) {
635:         PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
636:       } else {
637:         PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
638:       }
639:     }
640:     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
641:     if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
642:     PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
643:     if (selectport) {
644:       PetscCallSAWs(SAWs_Get_Available_Port, (&port));
645:       PetscCallSAWs(SAWs_Set_Port, (port));
646:     } else {
647:       PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
648:       if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
649:     }
650:     PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
651:     if (flg) {
652:       PetscCallSAWs(SAWs_Set_Document_Root, (root));
653:       PetscCall(PetscStrcmp(root, ".", &rootlocal));
654:     } else {
655:       PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
656:       if (flg) {
657:         PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
658:         PetscCallSAWs(SAWs_Set_Document_Root, (root));
659:       }
660:     }
661:     PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
662:     if (flg2) {
663:       char jsdir[PETSC_MAX_PATH_LEN];
664:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
665:       PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
666:       PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
667:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
668:       PetscCallSAWs(SAWs_Push_Local_Header, ());
669:     }
670:     PetscCall(PetscGetProgramName(programname, sizeof(programname)));
671:     PetscCall(PetscStrlen(help, &applinelen));
672:     introlen = 4096 + applinelen;
673:     applinelen += 1024;
674:     PetscCall(PetscMalloc(applinelen, &appline));
675:     PetscCall(PetscMalloc(introlen, &intro));

677:     if (rootlocal) {
678:       PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
679:       PetscCall(PetscTestFile(appline, 'r', &rootlocal));
680:     }
681:     PetscCall(PetscOptionsGetAll(NULL, &options));
682:     if (rootlocal && help) {
683:       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
684:     } else if (help) {
685:       PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
686:     } else {
687:       PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
688:     }
689:     PetscCall(PetscFree(options));
690:     PetscCall(PetscGetVersion(version, sizeof(version)));
691:     PetscCall(PetscSNPrintf(intro, introlen,
692:                             "<body>\n"
693:                             "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
694:                             "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
695:                             "%s",
696:                             version, petscconfigureoptions, appline));
697:     PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
698:     PetscCall(PetscFree(intro));
699:     PetscCall(PetscFree(appline));
700:     if (selectport) {
701:       PetscBool silent;

703:       /* another process may have grabbed the port so keep trying */
704:       while (SAWs_Initialize()) {
705:         PetscCallSAWs(SAWs_Get_Available_Port, (&port));
706:         PetscCallSAWs(SAWs_Set_Port, (port));
707:       }

709:       PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
710:       if (!silent) {
711:         PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
712:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
713:       }
714:     } else {
715:       PetscCallSAWs(SAWs_Initialize, ());
716:     }
717:     PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
718:                                      "  Author = {Matt Otten and Jed Brown and Barry Smith},\n"
719:                                      "  Title  = {Scientific Application Web Server (SAWs) Users Manual},\n"
720:                                      "  Institution = {Argonne National Laboratory},\n"
721:                                      "  Year   = 2013\n}\n",
722:                                      NULL));
723:   }
724:   PetscFunctionReturn(PETSC_SUCCESS);
725: }
726: #endif

728: /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
729: PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
730: {
731:   PetscFunctionBegin;
732: #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
733:   /* see MPI.py for details on this bug */
734:   (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
735: #endif
736:   PetscFunctionReturn(PETSC_SUCCESS);
737: }

739: #if PetscDefined(HAVE_ADIOS)
740:   #include <adios.h>
741:   #include <adios_read.h>
742: int64_t Petsc_adios_group;
743: #endif
744: #if PetscDefined(HAVE_OPENMP)
745:   #include <omp.h>
746: PetscInt PetscNumOMPThreads;
747: #endif

749: #include <petsc/private/deviceimpl.h>
750: #if PetscDefined(HAVE_CUDA)
751: #include <petscdevice_cuda.h>
752: // REMOVE ME
753: cudaStream_t PetscDefaultCudaStream = NULL;
754: #endif
755: #if PetscDefined(HAVE_HIP)
756: #include <petscdevice_hip.h>
757: // REMOVE ME
758: hipStream_t PetscDefaultHipStream = NULL;
759: #endif

761: #if PetscDefined(HAVE_DLFCN_H)
762:   #include <dlfcn.h>
763: #endif
764: PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
765: #if PetscDefined(HAVE_VIENNACL)
766: PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
767: PetscBool                   PetscViennaCLSynchronize = PETSC_FALSE;
768: #endif

770: PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;

772: /*
773:   PetscInitialize_Common  - shared code between C and Fortran initialization

775:   prog:     program name
776:   file:     optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
777:   help:     program help message
778:   ftn:      is it called from Fortran initialization (petscinitializef_)?
779:   len:      length of file string, used when Fortran is true
780: */
781: PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscInt len)
782: {
783:   PetscMPIInt size;
784:   PetscBool   flg = PETSC_TRUE;
785:   char        hostname[256];
786:   PetscBool   blas_view_flag = PETSC_FALSE;

788:   PetscFunctionBegin;
789:   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
790:   /* these must be initialized in a routine, not as a constant declaration */
791:   PETSC_STDOUT = stdout;
792:   PETSC_STDERR = stderr;

794:   /* PetscCall can be used from now */
795:   PetscErrorHandlingInitialized = PETSC_TRUE;

797:   /*
798:       The checking over compatible runtime libraries is complicated by the MPI ABI initiative
799:       https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
800:         MPICH v3.1 (Released February 2014)
801:         IBM MPI v2.1 (December 2014)
802:         Intel MPI Library v5.0 (2014)
803:         Cray MPT v7.0.0 (June 2014)
804:       As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
805:       listed above and since that time are compatible.

807:       Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
808:       at compile time or runtime. Thus we will need to systematically track the allowed versions
809:       and how they are represented in the mpi.h and MPI_Get_library_version() output in order
810:       to perform the checking.

812:       Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).

814:       Questions:

816:         Should the checks for ABI incompatibility be only on the major version number below?
817:         Presumably the output to stderr will be removed before a release.
818:   */

820: #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
821:   {
822:     char        mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
823:     PetscMPIInt mpilibraryversionlength;

825:     PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
826:     /* check for MPICH versions before MPI ABI initiative */
827:   #if defined(MPICH_VERSION)
828:     #if MPICH_NUMVERSION < 30100000
829:     {
830:       char     *ver, *lf;
831:       PetscBool flg = PETSC_FALSE;

833:       PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
834:       if (ver) {
835:         PetscCall(PetscStrchr(ver, '\n', &lf));
836:         if (lf) {
837:           *lf = 0;
838:           PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
839:         }
840:       }
841:       if (!flg) {
842:         PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
843:         flg = PETSC_TRUE;
844:       }
845:     }
846:     #endif
847:       /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
848:   #elif defined(PETSC_HAVE_OPENMPI)
849:     {
850:       char     *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
851:       PetscBool flg                                              = PETSC_FALSE;
852:     #define PSTRSZ 2
853:       char      ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
854:       char      ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
855:       int       i;
856:       for (i = 0; i < PSTRSZ; i++) {
857:         PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
858:         if (ver) {
859:           PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
860:           PetscCall(PetscStrstr(ver, bs, &bsf));
861:           if (bsf) flg = PETSC_TRUE;
862:           break;
863:         }
864:       }
865:       if (!flg) {
866:         PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
867:         flg = PETSC_TRUE;
868:       }
869:     }
870:   #endif
871:   }
872: #endif

874: #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
875:   /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
876:   PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
877: #endif

879:   /* on Windows - set printf to default to printing 2 digit exponents */
880: #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT)
881:   _set_output_format(_TWO_DIGIT_EXPONENT);
882: #endif

884:   PetscCall(PetscOptionsCreateDefault());

886:   PetscFinalizeCalled = PETSC_FALSE;

888:   PetscCall(PetscSetProgramName(prog));
889:   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
890:   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
891:   PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
892:   PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));

894:   if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
895:   PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));

897:   if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
898:     PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
899:     PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
900:   }

902:   /* Done after init due to a bug in MPICH-GM? */
903:   PetscCall(PetscErrorPrintfInitialize());

905:   PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
906:   PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));

908:   MPIU_BOOL        = MPI_INT;
909:   MPIU_ENUM        = MPI_INT;
910:   MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
911:   if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
912:   else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
913: #if defined(PETSC_SIZEOF_LONG_LONG)
914:   else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
915: #endif
916:   else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");

918:   /*
919:      Initialized the global complex variable; this is because with
920:      shared libraries the constructors for global variables
921:      are not called; at least on IRIX.
922:   */
923: #if defined(PETSC_HAVE_COMPLEX)
924:   {
925:   #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
926:     PetscComplex ic(0.0, 1.0);
927:     PETSC_i = ic;
928:   #else
929:     PETSC_i = _Complex_I;
930:   #endif
931:   }
932: #endif /* PETSC_HAVE_COMPLEX */

934:   /*
935:      Create the PETSc MPI reduction operator that sums of the first
936:      half of the entries and maxes the second half.
937:   */
938:   PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));

940: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
941:   PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
942:   PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
943:   #if defined(PETSC_HAVE_COMPLEX)
944:   PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
945:   PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
946:   #endif
947: #endif
948: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
949:   PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
950:   PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
951: #endif

953: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
954:   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
955:   PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
956:   PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
957: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
958:   PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
959: #endif

961:   PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
962:   PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
963:   PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));

965:   /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
966: #if !defined(PETSC_HAVE_MPIUNI)
967:   {
968:     PetscMPIInt  blockSizes[2]   = {1, 1};
969:     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
970:     MPI_Datatype blockTypes[2]   = {MPIU_REAL, MPIU_INT}, tmpStruct;

972:     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
973:     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
974:     PetscCallMPI(MPI_Type_free(&tmpStruct));
975:     PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
976:   }
977:   {
978:     PetscMPIInt  blockSizes[2]   = {1, 1};
979:     MPI_Aint     blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
980:     MPI_Datatype blockTypes[2]   = {MPIU_SCALAR, MPIU_INT}, tmpStruct;

982:     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
983:     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
984:     PetscCallMPI(MPI_Type_free(&tmpStruct));
985:     PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
986:   }
987: #endif

989: #if defined(PETSC_USE_64BIT_INDICES)
990:   PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
991:   PetscCallMPI(MPI_Type_commit(&MPIU_2INT));

993:   #if !defined(PETSC_HAVE_MPIUNI)
994:   {
995:     int          blockSizes[]   = {1, 1};
996:     MPI_Aint     blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
997:     MPI_Datatype blockTypes[]   = {MPIU_INT, MPI_INT}, tmpStruct;

999:     PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
1000:     PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
1001:     PetscCallMPI(MPI_Type_free(&tmpStruct));
1002:     PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
1003:   }
1004:   #endif
1005: #endif
1006:   PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
1007:   PetscCallMPI(MPI_Type_commit(&MPI_4INT));
1008:   PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
1009:   PetscCallMPI(MPI_Type_commit(&MPIU_4INT));

1011:   /*
1012:      Attributes to be set on PETSc communicators
1013:   */
1014:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1015:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1016:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1017:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1018:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1019:   PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));

1021: #if defined(PETSC_USE_FORTRAN_BINDINGS)
1022:   if (ftn) PetscCall(PetscInitFortran_Private(file, len));
1023:   else
1024: #endif
1025:     PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));

1027:   if (PetscDefined(HAVE_MPIUNI)) {
1028:     const char *mpienv = getenv("PMI_SIZE");
1029:     if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1030:     if (mpienv) {
1031:       PetscInt  isize;
1032:       PetscBool mflag = PETSC_FALSE;

1034:       PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1035:       PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL));
1036:       PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs.");
1037:     }
1038:   }

1040:   /* call a second time so it can look in the options database */
1041:   PetscCall(PetscErrorPrintfInitialize());

1043:   /*
1044:      Check system options and print help
1045:   */
1046:   PetscCall(PetscOptionsCheckInitial_Private(help));

1048:   /*
1049:     Creates the logging data structures; this is enabled even if logging is not turned on
1050:     This is the last thing we do before returning to the user code to prevent having the
1051:     logging numbers contaminated by any startup time associated with MPI
1052:   */
1053:   PetscCall(PetscLogInitialize());

1055:   /*
1056:    Initialize PetscDevice and PetscDeviceContext

1058:    Note to any future devs thinking of moving this, proper initialization requires:
1059:    1. MPI initialized
1060:    2. Options DB initialized
1061:    3. PETSc error handling initialized, specifically signal handlers. This expects to set up
1062:       its own SIGSEV handler via the push/pop interface.
1063:    4. Logging initialized
1064:   */
1065:   PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));

1067: #if PetscDefined(HAVE_VIENNACL)
1068:   flg = PETSC_FALSE;
1069:   PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1070:   if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1071:   PetscViennaCLSynchronize = flg;
1072:   PetscCall(PetscViennaCLInit());
1073: #endif

1075:   PetscCall(PetscCitationsInitialize());

1077: #if defined(PETSC_HAVE_SAWS)
1078:   PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1079:   flg = PETSC_FALSE;
1080:   PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1081:   if (flg) PetscCall(PetscStackViewSAWs());
1082: #endif

1084:   /*
1085:      Load the dynamic libraries (on machines that support them), this registers all
1086:      the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1087:   */
1088:   PetscCall(PetscInitialize_DynamicLibraries());

1090:   PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1091:   PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1092:   PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1093:   PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1094: #if defined(PETSC_HAVE_OPENMP)
1095:   {
1096:     PetscBool omp_view_flag;
1097:     char     *threads = getenv("OMP_NUM_THREADS");

1099:     if (threads) {
1100:       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1101:       (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1102:     } else {
1103:       PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1104:       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1105:     }
1106:     PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1107:     PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1108:     PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1109:     PetscOptionsEnd();
1110:     if (flg) {
1111:       PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1112:       omp_set_num_threads((int)PetscNumOMPThreads);
1113:     }
1114:     if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1115:   }
1116: #endif

1118:   PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1119:   PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1120: #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1121:   {
1122:     char *threads = NULL;

1124:     /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1125:   #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1126:     threads = getenv("BLIS_NUM_THREADS");
1127:     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1128:     if (!threads) {
1129:       threads = getenv("OMP_NUM_THREADS");
1130:       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1131:     }
1132:   #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1133:     threads = getenv("MKL_NUM_THREADS");
1134:     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1135:     if (!threads) {
1136:       threads = getenv("OMP_NUM_THREADS");
1137:       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1138:     }
1139:   #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1140:     threads = getenv("OPENBLAS_NUM_THREADS");
1141:     if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1142:     if (!threads) {
1143:       threads = getenv("OMP_NUM_THREADS");
1144:       if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1145:     }
1146:   #endif
1147:     if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1148:     PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1149:     if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1150:     if (flg || threads) {
1151:       PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1152:       if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1153:     }
1154:   }
1155: #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1156:   PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1157:   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1158: #else
1159:   if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1160: #endif
1161:   PetscOptionsEnd();

1163: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1164:   /*
1165:       Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI

1167:       Currently not used because it is not supported by MPICH.
1168:   */
1169:   if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1170: #endif

1172: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1173:   PetscCall(PetscFPTCreate(10000));
1174: #endif

1176: #if defined(PETSC_HAVE_HWLOC)
1177:   {
1178:     PetscViewer viewer;
1179:     PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1180:     if (flg) {
1181:       PetscCall(PetscProcessPlacementView(viewer));
1182:       PetscCall(PetscViewerDestroy(&viewer));
1183:     }
1184:   }
1185: #endif

1187:   flg = PETSC_TRUE;
1188:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1189:   if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));

1191: #if defined(PETSC_HAVE_ADIOS)
1192:   PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1193:   PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1194:   PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1195:   PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1196: #endif

1198: #if defined(__VALGRIND_H)
1199:   PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1200:   #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1201:   if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1202:   #endif
1203: #endif
1204:   /*
1205:       Set flag that we are completely initialized
1206:   */
1207:   PetscInitializeCalled = PETSC_TRUE;

1209:   PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1210:   if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));

1212:   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1213:   if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1214:   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1215:   else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1216:   PetscFunctionReturn(PETSC_SUCCESS);
1217: }

1219: // "Unknown section 'Environmental Variables'"
1220: // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1221: /*@C
1222:   PetscInitialize - Initializes the PETSc database and MPI.
1223:   `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1224:   so this routine should always be called near the beginning of
1225:   your program -- usually the very first line!

1227:   Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set

1229:   Input Parameters:
1230: + argc - count of number of command line arguments
1231: . args - the command line arguments
1232: . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1233:           Use `NULL` or empty string to not check for code specific file.
1234:           Also checks `~/.petscrc`, `.petscrc` and `petscrc`.
1235:           Use `-skip_petscrc` in the code specific file (or command line) to skip `~/.petscrc`, `.petscrc` and `petscrc` files.
1236: - help - [optional] Help message to print, use `NULL` for no message

1238:    If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1239:    communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`.
1240:    then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1241:    if different subcommunicators of the job are doing different things with PETSc.

1243:   Options Database Keys:
1244: + -help [intro]                                       - prints help method for each option; if `intro` is given the program stops after printing the introductory help message
1245: . -start_in_debugger [noxterm,dbx,xdb,gdb,...]        - Starts program in debugger
1246: . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
1247: . -on_error_emacs <machinename>                       - causes `emacsclient` to jump to error file if an error is detected
1248: . -on_error_abort                                     - calls `abort()` when error detected (no traceback)
1249: . -on_error_mpiabort                                  - calls `MPI_abort()` when error detected
1250: . -error_output_stdout                                - prints PETSc error messages to `stdout` instead of the default `stderr`
1251: . -error_output_none                                  - does not print the error messages (but handles errors in the same way as if this was not called)
1252: . -debugger_ranks [rank1,rank2,...]                   - Indicates MPI ranks to start in debugger
1253: . -debugger_pause [sleeptime] (in seconds)            - Pauses debugger, use if it takes a long time for the debugger to start up on your system
1254: . -stop_for_debugger                                  - Print message on how to attach debugger manually to
1255:                                                         process and wait (`-debugger_pause`) seconds for attachment
1256: . -malloc_dump                                        - prints a list of all unfreed memory at the end of the run
1257: . -malloc_test                                        - like `-malloc_dump` `-malloc_debug`, only active for debugging build, ignored in optimized build. Often set in `PETSC_OPTIONS` environmental variable
1258: . -malloc_view                                        - show a list of all allocated memory during `PetscFinalize()`
1259: . -malloc_view_threshold <t>                          - only list memory allocations of size greater than t with `-malloc_view`
1260: . -malloc_requested_size                              - malloc logging will record the requested size rather than (possibly large) size after alignment
1261: . -fp_trap                                            - Stops on floating point exceptions
1262: . -no_signal_handler                                  - Indicates not to trap error signals
1263: . -shared_tmp                                         - indicates `/tmp` directory is known to be shared by all processors
1264: . -not_shared_tmp                                     - indicates each processor has own `/tmp`
1265: . -tmp                                                - alternative directory to use instead of `/tmp`
1266: - -mpiuni-allow-multiprocess-launch                   - allow `mpiexec` to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni

1268:   Options Database Keys for Option Database:
1269: + -skip_petscrc           - skip the default option files `~/.petscrc`, `.petscrc`, `petscrc`
1270: . -options_monitor        - monitor all set options to standard output for the whole program run
1271: - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`

1273:    Options -options_monitor_{all,cancel} are
1274:    position-independent and apply to all options set since the PETSc start.
1275:    They can be used also in option files.

1277:    See `PetscOptionsMonitorSet()` to do monitoring programmatically.

1279:   Options Database Keys for Profiling:
1280:    See Users-Manual: ch_profiling for details.
1281: + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`.
1282: . -log_sync                                            - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1283:                                                          however it slows things down and gives a distorted view of the overall runtime.
1284: . -log_trace [filename]                                - Print traces of all PETSc calls to the screen (useful to determine where a program
1285:                                                          hangs without running in the debugger).  See `PetscLogTraceBegin()`.
1286: . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1287: . -log_view_memory                                     - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1288: . -log_view_gpu_time                                   - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1289: . -log_exclude: <vec,mat,pc,ksp,snes>                  - excludes subset of object classes from logging
1290: . -log [filename]                                      - Logs profiling information in a dump file, see `PetscLogDump()`.
1291: . -log_all [filename]                                  - Same as `-log`.
1292: . -log_mpe [filename]                                  - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1293: . -log_perfstubs                                       - Starts a log handler with the perfstubs interface (which is used by TAU)
1294: . -log_nvtx                                            - Starts an nvtx log handler for use with Nsight
1295: . -viewfromoptions on,off                              - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1296: . -get_total_flops                                     - Returns total flops done by all processors
1297: . -memory_view                                         - Print memory usage at end of run
1298: - -check_pointer_intensity 0,1,2                       - if pointers are checked for validity (debug version only), using 0 will result in faster code

1300:   Options Database Keys for SAWs:
1301: + -saws_port <portnumber>        - port number to publish SAWs data, default is 8080
1302: . -saws_port_auto_select         - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1303:                                    this is useful when you are running many jobs that utilize SAWs at the same time
1304: . -saws_log <filename>           - save a log of all SAWs communication
1305: . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP
1306: - -saws_root <directory>         - allow SAWs to have access to the given directory to search for requested resources and files

1308:   Environmental Variables:
1309: +   `PETSC_TMP`                   - alternative directory to use instead of `/tmp`
1310: .   `PETSC_SHARED_TMP`            - `/tmp` is shared by all processes
1311: .   `PETSC_NOT_SHARED_TMP`        - each process has its own private `/tmp`
1312: .   `PETSC_OPTIONS`               - a string containing additional options for PETSc in the form of command line "-key value" pairs
1313: .   `PETSC_OPTIONS_YAML`          - (requires configuring PETSc to use libyaml with `--download-yaml`) a string containing additional options for PETSc in the form of a YAML document
1314: .   `PETSC_VIEWER_SOCKET_PORT`    - socket number to use for socket viewer
1315: -   `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to

1317:   Level: beginner

1319:   Note:
1320:   If for some reason you must call `MPI_Init()` separately from `PetscInitialize()`, call
1321:   it before `PetscInitialize()`.

1323:   Fortran Notes:
1324:   In Fortran this routine can be called with
1325: .vb
1326:        call PetscInitialize(ierr)
1327:        call PetscInitialize(file,ierr) or
1328:        call PetscInitialize(file,help,ierr)
1329: .ve

1331:   If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1332:   calling `PetscInitialize()`.

1334:   Options Database Key for Developers:
1335: . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1336:                        "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)

1338: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1339: @*/
1340: PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1341: {
1342:   PetscMPIInt flag;
1343:   const char *prog = "Unknown Name";

1345:   PetscFunctionBegin;
1346:   if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1347:   PetscCallMPI(MPI_Initialized(&flag));
1348:   if (!flag) {
1349:     PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1350:     PetscCall(PetscPreMPIInit_Private());
1351: #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1352:     {
1353:       PetscMPIInt provided;
1354:       PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1355:       PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1356:       if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1357:     }
1358: #else
1359:     PetscCallMPI(MPI_Init(argc, args));
1360: #endif
1361:     PetscBeganMPI = PETSC_TRUE;
1362:   }

1364:   if (argc && *argc) prog = **args;
1365:   if (argc && args) {
1366:     PetscGlobalArgc = *argc;
1367:     PetscGlobalArgs = *args;
1368:   }
1369:   PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, 0));
1370:   PetscFunctionReturn(PETSC_SUCCESS);
1371: }

1373: PETSC_INTERN PetscObject *PetscObjects;
1374: PETSC_INTERN PetscInt     PetscObjectsCounts;
1375: PETSC_INTERN PetscInt     PetscObjectsMaxCounts;
1376: PETSC_INTERN PetscBool    PetscObjectsLog;

1378: /*
1379:     Frees all the MPI types and operations that PETSc may have created
1380: */
1381: PetscErrorCode PetscFreeMPIResources(void)
1382: {
1383:   PetscFunctionBegin;
1384: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1385:   PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1386:   #if defined(PETSC_HAVE_COMPLEX)
1387:   PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1388:   #endif
1389: #endif
1390: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1391:   PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1392: #endif

1394: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1395:   PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1396:   PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1397:   PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1398: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1399:   PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1400: #endif

1402:   PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1403:   PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1404:   PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1405: #if defined(PETSC_USE_64BIT_INDICES)
1406:   PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1407:   PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1408: #endif
1409:   PetscCallMPI(MPI_Type_free(&MPI_4INT));
1410:   PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1411:   PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1412:   PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1413:   PetscFunctionReturn(PETSC_SUCCESS);
1414: }

1416: PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1417: PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);

1419: /*@
1420:   PetscFinalize - Checks for options to be called at the conclusion of a PETSc program and frees any remaining PETSc objects and data structures.
1421:   of the program. Automatically calls `MPI_Finalize()` if the user had not called `MPI_Init()` before calling `PetscInitialize()`.

1423:   Collective on `PETSC_COMM_WORLD`

1425:   Options Database Keys:
1426: + -options_view                    - Calls `PetscOptionsView()`
1427: . -options_left                    - Prints unused options that remain in the database
1428: . -objects_dump [all]              - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1429: . -mpidump                         - Calls PetscMPIDump()
1430: . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1431: . -memory_view                     - Prints total memory usage
1432: - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions

1434:   Level: beginner

1436:   Note:
1437:   See `PetscInitialize()` for other runtime options.

1439:   You can call `PetscInitialize()` after `PetscFinalize()` but only with MPI-Uni or if you called `MPI_Init()` before ever calling `PetscInitialize()`.

1441: .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1442: @*/
1443: PetscErrorCode PetscFinalize(void)
1444: {
1445:   PetscMPIInt rank;
1446:   PetscInt    nopt;
1447:   PetscBool   flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1448:   PetscBool   flg;
1449:   char        mname[PETSC_MAX_PATH_LEN];

1451:   PetscFunctionBegin;
1452:   PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1453:   PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));

1455:   PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1456:   if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());

1458:   PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1459:   PetscGlobalArgc = 0;
1460:   PetscGlobalArgs = NULL;

1462:   /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1463:   {
1464:     union
1465:     {
1466:       MPI_Comm comm;
1467:       void    *ptr;
1468:     } ucomm;
1469:     PetscMPIInt flg;
1470:     void       *tmp;

1472:     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1473:     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1474:     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1475:     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1476:     if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1477:     if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1478:   }

1480:   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1481: #if defined(PETSC_HAVE_ADIOS)
1482:   PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1483:   PetscCallExternal(adios_finalize, rank);
1484: #endif
1485:   PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1486:   if (flg) {
1487:     char *cits, filename[PETSC_MAX_PATH_LEN];
1488:     FILE *fd = PETSC_STDOUT;

1490:     PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1491:     if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1492:     PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1493:     cits[0] = 0;
1494:     PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1495:     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1496:     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1497:     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1498:     PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1499:     PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1500:     PetscCall(PetscFree(cits));
1501:   }
1502:   PetscCall(PetscSegBufferDestroy(&PetscCitationsList));

1504: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1505:   PetscCall(PetscFPTDestroy());
1506: #endif

1508: #if defined(PETSC_HAVE_X)
1509:   flg1 = PETSC_FALSE;
1510:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1511:   if (flg1) {
1512:     /*  this is a crude hack, but better than nothing */
1513:     PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1514:   }
1515: #endif

1517: #if !defined(PETSC_HAVE_THREADSAFETY)
1518:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1519:   if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1520: #endif

1522:   if (PetscDefined(USE_LOG)) {
1523:     flg1 = PETSC_FALSE;
1524:     PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1525:     if (flg1) {
1526:       PetscLogDouble flops = 0;
1527:       PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1528:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1529:     }
1530:   }

1532:   if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1533:     mname[0] = 0;
1534:     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1535:     if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1536:   }

1538: #if defined(PETSC_HAVE_KOKKOS)
1539:   // Free PETSc/kokkos stuff before the potentially non-null PETSc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1540:   if (PetscKokkosInitialized) {
1541:     PetscCall(PetscKokkosFinalize_Private());
1542:     PetscKokkosInitialized = PETSC_FALSE;
1543:   }
1544: #endif

1546:   // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1547:   PetscCall(PetscObjectRegisterDestroyAll());

1549:   if (PetscDefined(USE_LOG)) {
1550:     PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1551:     PetscCall(PetscLogViewFromOptions());
1552:     PetscCall(PetscOptionsPopCreateViewerOff());
1553:     //  It should be turned on with PetscLogGpuTime() and never turned off except in this place
1554:     PetscLogGpuTimeFlag = PETSC_FALSE;

1556:     // Free any objects created by the last block of code.
1557:     PetscCall(PetscObjectRegisterDestroyAll());

1559:     mname[0] = 0;
1560:     PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1561:     PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1562:     if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1563:   }

1565:   flg1 = PETSC_FALSE;
1566:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1567:   if (!flg1) PetscCall(PetscPopSignalHandler());
1568:   flg1 = PETSC_FALSE;
1569:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1570:   if (flg1) PetscCall(PetscMPIDump(stdout));
1571:   flg1 = PETSC_FALSE;
1572:   flg2 = PETSC_FALSE;
1573:   /* preemptive call to avoid listing this option in options table as unused */
1574:   PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1575:   PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1576:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));

1578:   if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); }

1580:   /* to prevent PETSc -options_left from warning */
1581:   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1582:   PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));

1584:   flg3 = PETSC_FALSE; /* default value is required */
1585:   PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1586:   if (!flg1) flg3 = PETSC_TRUE;
1587:   if (flg3) {
1588:     if (!flg2 && flg1) { /* have not yet printed the options */
1589:       PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1590:     }
1591:     PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1592:     if (nopt) {
1593:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1594:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1595:       if (nopt == 1) {
1596:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1597:       } else {
1598:         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1599:       }
1600:     } else if (flg3 && flg1) {
1601:       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1602:     }
1603:     PetscCall(PetscOptionsLeft(NULL));
1604:   }

1606: #if defined(PETSC_HAVE_SAWS)
1607:   if (!PetscGlobalRank) {
1608:     PetscCall(PetscStackSAWsViewOff());
1609:     PetscCallSAWs(SAWs_Finalize, ());
1610:   }
1611: #endif

1613:   /*
1614:        List all objects the user may have forgot to free
1615:   */
1616:   if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1617:     PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1618:     if (flg1) {
1619:       MPI_Comm local_comm;
1620:       char     string[64];

1622:       PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1623:       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1624:       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1625:       PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1626:       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1627:       PetscCallMPI(MPI_Comm_free(&local_comm));
1628:     }
1629:   }

1631:   PetscObjectsCounts    = 0;
1632:   PetscObjectsMaxCounts = 0;
1633:   PetscCall(PetscFree(PetscObjects));

1635:   /*
1636:      Destroy any packages that registered a finalize
1637:   */
1638:   PetscCall(PetscRegisterFinalizeAll());

1640:   PetscCall(PetscLogFinalize());

1642:   /*
1643:      Print PetscFunctionLists that have not been properly freed
1644:   */
1645:   if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());

1647:   if (petsc_history) {
1648:     PetscCall(PetscCloseHistoryFile(&petsc_history));
1649:     petsc_history = NULL;
1650:   }
1651:   PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1652:   PetscCall(PetscInfoDestroy());

1654: #if !defined(PETSC_HAVE_THREADSAFETY)
1655:   if (!(PETSC_RUNNING_ON_VALGRIND)) {
1656:     char  fname[PETSC_MAX_PATH_LEN];
1657:     char  sname[PETSC_MAX_PATH_LEN];
1658:     FILE *fd;
1659:     int   err;

1661:     flg2 = PETSC_FALSE;
1662:     flg3 = PETSC_FALSE;
1663:     if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1664:     PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1665:     fname[0] = 0;
1666:     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1667:     if (flg1 && fname[0]) {
1668:       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1669:       fd = fopen(sname, "w");
1670:       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1671:       PetscCall(PetscMallocDump(fd));
1672:       err = fclose(fd);
1673:       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1674:     } else if (flg1 || flg2 || flg3) {
1675:       MPI_Comm local_comm;

1677:       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1678:       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1679:       PetscCall(PetscMallocDump(stdout));
1680:       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1681:       PetscCallMPI(MPI_Comm_free(&local_comm));
1682:     }
1683:     fname[0] = 0;
1684:     PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1685:     if (flg1 && fname[0]) {
1686:       PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1687:       fd = fopen(sname, "w");
1688:       PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1689:       PetscCall(PetscMallocView(fd));
1690:       err = fclose(fd);
1691:       PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1692:     } else if (flg1) {
1693:       MPI_Comm local_comm;

1695:       PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1696:       PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1697:       PetscCall(PetscMallocView(stdout));
1698:       PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1699:       PetscCallMPI(MPI_Comm_free(&local_comm));
1700:     }
1701:   }
1702: #endif

1704:   /*
1705:      Close any open dynamic libraries
1706:   */
1707:   PetscCall(PetscFinalize_DynamicLibraries());

1709:   /* Can be destroyed only after all the options are used */
1710:   PetscCall(PetscOptionsDestroyDefault());

1712: #if defined(PETSC_HAVE_NVSHMEM)
1713:   if (PetscBeganNvshmem) {
1714:     PetscCall(PetscNvshmemFinalize());
1715:     PetscBeganNvshmem = PETSC_FALSE;
1716:   }
1717: #endif

1719:   PetscCall(PetscFreeMPIResources());

1721:   /*
1722:      Destroy any known inner MPI_Comm's and attributes pointing to them
1723:      Note this will not destroy any new communicators the user has created.

1725:      If all PETSc objects were not destroyed those left over objects will have hanging references to
1726:      the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1727:  */
1728:   {
1729:     PetscCommCounter *counter;
1730:     PetscMPIInt       flg;
1731:     MPI_Comm          icomm;
1732:     union
1733:     {
1734:       MPI_Comm comm;
1735:       void    *ptr;
1736:     } ucomm;
1737:     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1738:     if (flg) {
1739:       icomm = ucomm.comm;
1740:       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1741:       PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");

1743:       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1744:       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1745:       PetscCallMPI(MPI_Comm_free(&icomm));
1746:     }
1747:     PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1748:     if (flg) {
1749:       icomm = ucomm.comm;
1750:       PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1751:       PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");

1753:       PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1754:       PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1755:       PetscCallMPI(MPI_Comm_free(&icomm));
1756:     }
1757:   }

1759:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1760:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1761:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1762:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1763:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1764:   PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));

1766:   // Free keyvals which may be silently created by some routines
1767:   if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1768:   if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));

1770:   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1771:   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1772:   PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1773:   PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));

1775:   if (PetscBeganMPI) {
1776:     PetscMPIInt flag;
1777:     PetscCallMPI(MPI_Finalized(&flag));
1778:     PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1779:     /* wait until the very last moment to disable error handling */
1780:     PetscErrorHandlingInitialized = PETSC_FALSE;
1781:     PetscCallMPI(MPI_Finalize());
1782:   } else PetscErrorHandlingInitialized = PETSC_FALSE;

1784:   /*
1785:      Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1786:    the communicator has some outstanding requests on it. Specifically if the
1787:    flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1788:    src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1789:    is never freed as it should be. Thus one may obtain messages of the form
1790:    [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1791:    memory was not freed.

1793: */
1794:   PetscCall(PetscMallocClear());
1795:   PetscCall(PetscStackReset());

1797:   PetscInitializeCalled = PETSC_FALSE;
1798:   PetscFinalizeCalled   = PETSC_TRUE;
1799: #if defined(PETSC_USE_COVERAGE)
1800:   /*
1801:      flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1802:      gcov files are still being added to the directories as git tries to remove the directories.
1803:    */
1804:   __gcov_flush();
1805: #endif
1806:   /* To match PetscFunctionBegin() at the beginning of this function */
1807:   PetscStackClearTop;
1808:   return PETSC_SUCCESS;
1809: }

1811: #if defined(PETSC_MISSING_LAPACK_lsame_)
1812: PETSC_EXTERN int lsame_(char *a, char *b)
1813: {
1814:   if (*a == *b) return 1;
1815:   if (*a + 32 == *b) return 1;
1816:   if (*a - 32 == *b) return 1;
1817:   return 0;
1818: }
1819: #endif

1821: #if defined(PETSC_MISSING_LAPACK_lsame)
1822: PETSC_EXTERN int lsame(char *a, char *b)
1823: {
1824:   if (*a == *b) return 1;
1825:   if (*a + 32 == *b) return 1;
1826:   if (*a - 32 == *b) return 1;
1827:   return 0;
1828: }
1829: #endif

1831: static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1832: {
1833:   PetscMPIInt err;
1834: #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1835:   PetscMPIInt count2;

1837:   PetscMPIIntCast_Internal(count, &count2);
1838:   err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1839: #else
1840:   err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1841: #endif
1842:   return err;
1843: }

1845: /*
1846:      When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1847: */
1848: PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1849: {
1850:   PetscMPIInt err;
1851:   if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) {
1852:     PetscInt64 incnt, outcnt;
1853:     void      *inbufd, *outbufd;

1855:     if (inbuf != MPI_IN_PLACE) {
1856:       incnt   = *(PetscInt32 *)inbuf;
1857:       inbufd  = &incnt;
1858:       outbufd = &outcnt;
1859:       err     = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1860:     } else {
1861:       outcnt  = *(PetscInt32 *)outbuf;
1862:       outbufd = &outcnt;
1863:       err     = MPIU_Allreduce_Count(MPI_IN_PLACE, outbufd, count, MPIU_INT64, op, comm);
1864:     }
1865:     if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1866:     *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1867:   } else {
1868:     err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1869:   }
1870:   return err;
1871: }

1873: /*@C
1874:   PetscCtxDestroyDefault - An implementation of a `PetscCtxDestroyFn` that uses `PetscFree()` to free the context

1876:   Input Parameter:
1877: . ctx - the context to be destroyed

1879:   Level: intermediate

1881:   Note:
1882:   This is not called directly, rather it is passed to `DMSetApplicationContextDestroy()`, `PetscContainerSetDestroy()`,
1883:   `PetscObjectContainterCreate()` and similar routines and then called by the destructor of the associated object.

1885: .seealso: `PetscObject`, `PetscCtxDestroyFn`, `PetscObjectDestroy()`, `DMSetApplicationContextDestroy()`,  `PetscContainerSetDestroy()`,
1886:            `PetscObjectContainterCreate()`
1887: @*/
1888: PETSC_EXTERN PetscErrorCode PetscCtxDestroyDefault(void **ctx)
1889: {
1890:   PetscFunctionBegin;
1891:   PetscCall(PetscFree(*ctx));
1892:   PetscFunctionReturn(PETSC_SUCCESS);
1893: }