Actual source code: pinit.c
1: #define PETSC_DESIRE_FEATURE_TEST_MACROS
2: /*
3: This file defines the initialization of PETSc, including PetscInitialize()
4: */
5: #include <petsc/private/petscimpl.h>
6: #include <petsc/private/logimpl.h>
7: #include <petscviewer.h>
8: #include <petsc/private/garbagecollector.h>
10: #if !defined(PETSC_HAVE_WINDOWS_COMPILERS)
11: #include <petsc/private/valgrind/valgrind.h>
12: #endif
14: #if defined(PETSC_USE_FORTRAN_BINDINGS)
15: #include <petsc/private/fortranimpl.h>
16: #endif
18: #if PetscDefined(USE_COVERAGE)
19: EXTERN_C_BEGIN
20: #if defined(PETSC_HAVE___GCOV_DUMP)
22: #endif
23: void __gcov_flush(void);
24: EXTERN_C_END
25: #endif
27: #if defined(PETSC_SERIALIZE_FUNCTIONS)
28: PETSC_INTERN PetscFPT PetscFPTData;
29: PetscFPT PetscFPTData = 0;
30: #endif
32: #if PetscDefined(HAVE_SAWS)
33: #include <petscviewersaws.h>
34: #endif
36: PETSC_INTERN FILE *petsc_history;
38: PETSC_INTERN PetscErrorCode PetscInitialize_DynamicLibraries(void);
39: PETSC_INTERN PetscErrorCode PetscFinalize_DynamicLibraries(void);
40: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm, int);
41: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm, int);
42: PETSC_INTERN PetscErrorCode PetscCloseHistoryFile(FILE **);
44: /* user may set these BEFORE calling PetscInitialize() */
45: MPI_Comm PETSC_COMM_WORLD = MPI_COMM_NULL;
46: #if PetscDefined(HAVE_MPI_INIT_THREAD)
47: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = PETSC_DECIDE;
48: #else
49: PetscMPIInt PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_SINGLE;
50: #endif
52: PetscMPIInt Petsc_Counter_keyval = MPI_KEYVAL_INVALID;
53: PetscMPIInt Petsc_InnerComm_keyval = MPI_KEYVAL_INVALID;
54: PetscMPIInt Petsc_OuterComm_keyval = MPI_KEYVAL_INVALID;
55: PetscMPIInt Petsc_ShmComm_keyval = MPI_KEYVAL_INVALID;
56: PetscMPIInt Petsc_CreationIdx_keyval = MPI_KEYVAL_INVALID;
57: PetscMPIInt Petsc_Garbage_HMap_keyval = MPI_KEYVAL_INVALID;
59: PetscMPIInt Petsc_SharedWD_keyval = MPI_KEYVAL_INVALID;
60: PetscMPIInt Petsc_SharedTmp_keyval = MPI_KEYVAL_INVALID;
62: /*
63: Declare and set all the string names of the PETSc enums
64: */
65: const char *const PetscBools[] = {"FALSE", "TRUE", "PetscBool", "PETSC_", NULL};
66: const char *const PetscCopyModes[] = {"COPY_VALUES", "OWN_POINTER", "USE_POINTER", "PetscCopyMode", "PETSC_", NULL};
68: PetscBool PetscPreLoadingUsed = PETSC_FALSE;
69: PetscBool PetscPreLoadingOn = PETSC_FALSE;
71: PetscInt PetscHotRegionDepth;
73: PetscBool PETSC_RUNNING_ON_VALGRIND = PETSC_FALSE;
75: #if defined(PETSC_HAVE_THREADSAFETY)
76: PetscSpinlock PetscViewerASCIISpinLockOpen;
77: PetscSpinlock PetscViewerASCIISpinLockStdout;
78: PetscSpinlock PetscViewerASCIISpinLockStderr;
79: PetscSpinlock PetscCommSpinLock;
80: #endif
82: extern PetscInt PetscNumBLASThreads;
84: /*@C
85: PetscInitializeNoPointers - Calls PetscInitialize() from C/C++ without the pointers to argc and args
87: Collective, No Fortran Support
89: Input Parameters:
90: + argc - number of args
91: . args - array of command line arguments
92: . filename - optional name of the program file, pass `NULL` to ignore
93: - help - optional help, pass `NULL` to ignore
95: Level: advanced
97: Notes:
98: this is called only by the PETSc Julia interface. Even though it might start MPI it sets the flag to
99: indicate that it did NOT start MPI so that the `PetscFinalize()` does not end MPI, thus allowing `PetscInitialize()` to
100: be called multiple times from Julia without the problem of trying to initialize MPI more than once.
102: Developer Notes:
103: Turns off PETSc signal handling to allow Julia to manage signals
105: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`, `PetscInitializeNoArguments()`
106: */
107: PetscErrorCode PetscInitializeNoPointers(int argc, char **args, const char *filename, const char *help)
108: {
109: int myargc = argc;
110: char **myargs = args;
112: PetscFunctionBegin;
113: PetscCall(PetscInitialize(&myargc, &myargs, filename, help));
114: PetscCall(PetscPopSignalHandler());
115: PetscBeganMPI = PETSC_FALSE;
116: PetscFunctionReturn(PETSC_SUCCESS);
117: }
119: /*@C
120: PetscInitializeNoArguments - Calls `PetscInitialize()` from C/C++ without
121: the command line arguments.
123: Collective
125: Level: advanced
127: .seealso: `PetscInitialize()`, `PetscInitializeFortran()`
128: @*/
129: PetscErrorCode PetscInitializeNoArguments(void)
130: {
131: int argc = 0;
132: char **args = NULL;
134: PetscFunctionBegin;
135: PetscCall(PetscInitialize(&argc, &args, NULL, NULL));
136: PetscFunctionReturn(PETSC_SUCCESS);
137: }
139: /*@
140: PetscInitialized - Determine whether PETSc is initialized.
142: Output Parameter:
143: . isInitialized - `PETSC_TRUE` if PETSc is initialized, `PETSC_FALSE` otherwise
145: Level: beginner
147: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
148: @*/
149: PetscErrorCode PetscInitialized(PetscBool *isInitialized)
150: {
151: PetscFunctionBegin;
152: if (PetscInitializeCalled) PetscAssertPointer(isInitialized, 1);
153: *isInitialized = PetscInitializeCalled;
154: PetscFunctionReturn(PETSC_SUCCESS);
155: }
157: /*@
158: PetscFinalized - Determine whether `PetscFinalize()` has been called yet
160: Output Parameter:
161: . isFinalized - `PETSC_TRUE` if PETSc is finalized, `PETSC_FALSE` otherwise
163: Level: developer
165: .seealso: `PetscInitialize()`, `PetscInitializeNoArguments()`, `PetscInitializeFortran()`
166: @*/
167: PetscErrorCode PetscFinalized(PetscBool *isFinalized)
168: {
169: PetscFunctionBegin;
170: if (!PetscFinalizeCalled) PetscAssertPointer(isFinalized, 1);
171: *isFinalized = PetscFinalizeCalled;
172: PetscFunctionReturn(PETSC_SUCCESS);
173: }
175: PETSC_INTERN PetscErrorCode PetscOptionsCheckInitial_Private(const char[]);
177: /*
178: This function is the MPI reduction operation used to compute the sum of the
179: first half of the datatype and the max of the second half.
180: */
181: MPI_Op MPIU_MAXSUM_OP = 0;
182: MPI_Op Petsc_Garbage_SetIntersectOp = 0;
184: PETSC_INTERN void MPIAPI MPIU_MaxSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
185: {
186: PetscFunctionBegin;
187: if (*datatype == MPIU_INT_MPIINT && PetscDefined(USE_64BIT_INDICES)) {
188: #if defined(PETSC_USE_64BIT_INDICES)
189: struct petsc_mpiu_int_mpiint *xin = (struct petsc_mpiu_int_mpiint *)in, *xout = (struct petsc_mpiu_int_mpiint *)out;
190: PetscMPIInt count = *cnt;
192: for (PetscMPIInt i = 0; i < count; i++) {
193: xout[i].a = PetscMax(xout[i].a, xin[i].a);
194: xout[i].b += xin[i].b;
195: }
196: #endif
197: } else if (*datatype == MPIU_2INT || *datatype == MPIU_INT_MPIINT) {
198: PetscInt *xin = (PetscInt *)in, *xout = (PetscInt *)out;
199: PetscMPIInt count = *cnt;
201: for (PetscMPIInt i = 0; i < count; i++) {
202: xout[2 * i] = PetscMax(xout[2 * i], xin[2 * i]);
203: xout[2 * i + 1] += xin[2 * i + 1];
204: }
205: } else {
206: PetscErrorCode ierr = (*PetscErrorPrintf)("Can only handle MPIU_2INT and MPIU_INT_MPIINT data types");
207: (void)ierr;
208: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
209: }
210: PetscFunctionReturnVoid();
211: }
213: /*@
214: PetscMaxSum - Returns the max of the first entry over all MPI processes and the sum of the second entry.
216: Collective
218: Input Parameters:
219: + comm - the communicator
220: - array - an arry of length 2 times `size`, the number of MPI processes
222: Output Parameters:
223: + max - the maximum of `array[2*rank]` over all MPI processes
224: - sum - the sum of the `array[2*rank + 1]` over all MPI processes
226: Level: developer
228: .seealso: `PetscInitialize()`
229: @*/
230: PetscErrorCode PetscMaxSum(MPI_Comm comm, const PetscInt array[], PetscInt *max, PetscInt *sum)
231: {
232: PetscFunctionBegin;
233: #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
234: {
235: struct {
236: PetscInt max, sum;
237: } work;
238: PetscCallMPI(MPI_Reduce_scatter_block((void *)array, &work, 1, MPIU_2INT, MPIU_MAXSUM_OP, comm));
239: *max = work.max;
240: *sum = work.sum;
241: }
242: #else
243: {
244: PetscMPIInt size, rank;
245: struct {
246: PetscInt max, sum;
247: } *work;
248: PetscCallMPI(MPI_Comm_size(comm, &size));
249: PetscCallMPI(MPI_Comm_rank(comm, &rank));
250: PetscCall(PetscMalloc1(size, &work));
251: PetscCallMPI(MPIU_Allreduce((void *)array, work, size, MPIU_2INT, MPIU_MAXSUM_OP, comm));
252: *max = work[rank].max;
253: *sum = work[rank].sum;
254: PetscCall(PetscFree(work));
255: }
256: #endif
257: PetscFunctionReturn(PETSC_SUCCESS);
258: }
260: #if (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
261: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
262: #include <quadmath.h>
263: #endif
264: MPI_Op MPIU_SUM___FP16___FLOAT128 = 0;
265: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
266: MPI_Op MPIU_SUM = 0;
267: #endif
269: PETSC_EXTERN void MPIAPI PetscSum_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
270: {
271: PetscMPIInt i, count = *cnt;
273: PetscFunctionBegin;
274: if (*datatype == MPIU_REAL) {
275: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
276: for (i = 0; i < count; i++) xout[i] += xin[i];
277: }
278: #if defined(PETSC_HAVE_COMPLEX)
279: else if (*datatype == MPIU_COMPLEX) {
280: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
281: for (i = 0; i < count; i++) xout[i] += xin[i];
282: }
283: #endif
284: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
285: else if (*datatype == MPIU___FLOAT128) {
286: __float128 *xin = (__float128 *)in, *xout = (__float128 *)out;
287: for (i = 0; i < count; i++) xout[i] += xin[i];
288: #if defined(PETSC_HAVE_COMPLEX)
289: } else if (*datatype == MPIU___COMPLEX128) {
290: __complex128 *xin = (__complex128 *)in, *xout = (__complex128 *)out;
291: for (i = 0; i < count; i++) xout[i] += xin[i];
292: #endif
293: }
294: #endif
295: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
296: else if (*datatype == MPIU___FP16) {
297: __fp16 *xin = (__fp16 *)in, *xout = (__fp16 *)out;
298: for (i = 0; i < count; i++) xout[i] = (__fp16)(xin[i] + xout[i]);
299: }
300: #endif
301: else {
302: #if (!defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)) && (!defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16))
303: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
304: #elif !defined(PETSC_HAVE_REAL___FP16) || defined(PETSC_SKIP_REAL___FP16)
305: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, or MPIU___COMPLEX128 data types"));
306: #elif !defined(PETSC_HAVE_REAL___FLOAT128) || defined(PETSC_SKIP_REAL___FLOAT128)
307: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, or MPIU___FP16 data types"));
308: #else
309: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL, MPIU_COMPLEX, MPIU___FLOAT128, MPIU___COMPLEX128, or MPIU___FP16 data types"));
310: #endif
311: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
312: }
313: PetscFunctionReturnVoid();
314: }
315: #endif
317: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
318: MPI_Op MPIU_MAX = 0;
319: MPI_Op MPIU_MIN = 0;
321: PETSC_EXTERN void MPIAPI PetscMax_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
322: {
323: PetscInt i, count = *cnt;
325: PetscFunctionBegin;
326: if (*datatype == MPIU_REAL) {
327: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
328: for (i = 0; i < count; i++) xout[i] = PetscMax(xout[i], xin[i]);
329: }
330: #if defined(PETSC_HAVE_COMPLEX)
331: else if (*datatype == MPIU_COMPLEX) {
332: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
333: for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) < PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
334: }
335: #endif
336: else {
337: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_COMPLEX data types"));
338: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
339: }
340: PetscFunctionReturnVoid();
341: }
343: PETSC_EXTERN void MPIAPI PetscMin_Local(void *in, void *out, PetscMPIInt *cnt, MPI_Datatype *datatype)
344: {
345: PetscInt i, count = *cnt;
347: PetscFunctionBegin;
348: if (*datatype == MPIU_REAL) {
349: PetscReal *xin = (PetscReal *)in, *xout = (PetscReal *)out;
350: for (i = 0; i < count; i++) xout[i] = PetscMin(xout[i], xin[i]);
351: }
352: #if defined(PETSC_HAVE_COMPLEX)
353: else if (*datatype == MPIU_COMPLEX) {
354: PetscComplex *xin = (PetscComplex *)in, *xout = (PetscComplex *)out;
355: for (i = 0; i < count; i++) xout[i] = PetscRealPartComplex(xout[i]) > PetscRealPartComplex(xin[i]) ? xin[i] : xout[i];
356: }
357: #endif
358: else {
359: PetscCallAbort(MPI_COMM_SELF, (*PetscErrorPrintf)("Can only handle MPIU_REAL or MPIU_SCALAR data (i.e. double or complex) types"));
360: PETSCABORT(MPI_COMM_SELF, PETSC_ERR_ARG_WRONG);
361: }
362: PetscFunctionReturnVoid();
363: }
364: #endif
366: /*
367: Private routine to delete internal tag/name counter storage when a communicator is freed.
369: This is called by MPI, not by users. This is called by MPI_Comm_free() when the communicator that has this data as an attribute is freed.
371: Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
373: */
374: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_Counter_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *count_val, void *extra_state)
375: {
376: PetscCommCounter *counter = (PetscCommCounter *)count_val;
377: struct PetscCommStash *comms = counter->comms, *pcomm;
379: PetscFunctionBegin;
380: PetscCallReturnMPI(PetscInfo(NULL, "Deleting counter data in an MPI_Comm %ld\n", (long)comm));
381: PetscCallReturnMPI(PetscFree(counter->iflags));
382: while (comms) {
383: PetscCallMPIReturnMPI(MPI_Comm_free(&comms->comm));
384: pcomm = comms;
385: comms = comms->next;
386: PetscCallReturnMPI(PetscFree(pcomm));
387: }
388: PetscCallReturnMPI(PetscFree(counter));
389: PetscFunctionReturn(MPI_SUCCESS);
390: }
392: /*
393: This is invoked on the outer comm as a result of either PetscCommDestroy() (via MPI_Comm_delete_attr) or when the user
394: calls MPI_Comm_free().
396: This is the only entry point for breaking the links between inner and outer comms.
398: This is called by MPI, not by users. This is called when MPI_Comm_free() is called on the communicator.
400: Note: this is declared extern "C" because it is passed to MPI_Comm_create_keyval()
402: */
403: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_InnerComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
404: {
405: union
406: {
407: MPI_Comm comm;
408: void *ptr;
409: } icomm;
411: PetscFunctionBegin;
412: PetscCheckReturnMPI(keyval == Petsc_InnerComm_keyval, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Unexpected keyval");
413: icomm.ptr = attr_val;
414: if (PetscDefined(USE_DEBUG)) {
415: /* Error out if the inner/outer comms are not correctly linked through their Outer/InnterComm attributes */
416: PetscMPIInt flg;
417: union
418: {
419: MPI_Comm comm;
420: void *ptr;
421: } ocomm;
422: PetscCallMPIReturnMPI(MPI_Comm_get_attr(icomm.comm, Petsc_OuterComm_keyval, &ocomm, &flg));
423: PetscCheckReturnMPI(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm does not have OuterComm attribute");
424: PetscCheckReturnMPI(ocomm.comm == comm, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner comm's OuterComm attribute does not point to outer PETSc comm");
425: }
426: PetscCallMPIReturnMPI(MPI_Comm_delete_attr(icomm.comm, Petsc_OuterComm_keyval));
427: PetscCallReturnMPI(PetscInfo(NULL, "User MPI_Comm %ld is being unlinked from inner PETSc comm %ld\n", (long)comm, (long)icomm.comm));
428: PetscFunctionReturn(MPI_SUCCESS);
429: }
431: /*
432: * This is invoked on the inner comm when Petsc_InnerComm_Attr_DeleteFn calls MPI_Comm_delete_attr(). It should not be reached any other way.
433: */
434: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_OuterComm_Attr_DeleteFn(MPI_Comm comm, PetscMPIInt keyval, void *attr_val, void *extra_state)
435: {
436: PetscFunctionBegin;
437: PetscCallReturnMPI(PetscInfo(NULL, "Removing reference to PETSc communicator embedded in a user MPI_Comm %ld\n", (long)comm));
438: PetscFunctionReturn(MPI_SUCCESS);
439: }
441: PETSC_EXTERN PetscMPIInt MPIAPI Petsc_ShmComm_Attr_DeleteFn(MPI_Comm, PetscMPIInt, void *, void *);
443: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
444: PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype, MPI_Aint *, void *);
445: PETSC_EXTERN PetscMPIInt PetscDataRep_read_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
446: PETSC_EXTERN PetscMPIInt PetscDataRep_write_conv_fn(void *, MPI_Datatype, PetscMPIInt, void *, MPI_Offset, void *);
447: #endif
449: PetscMPIInt PETSC_MPI_ERROR_CLASS = MPI_ERR_LASTCODE, PETSC_MPI_ERROR_CODE;
451: PETSC_INTERN int PetscGlobalArgc;
452: PETSC_INTERN char **PetscGlobalArgs, **PetscGlobalArgsFortran;
453: int PetscGlobalArgc = 0;
454: char **PetscGlobalArgs = NULL;
455: char **PetscGlobalArgsFortran = NULL;
456: PetscSegBuffer PetscCitationsList;
458: PetscErrorCode PetscCitationsInitialize(void)
459: {
460: PetscFunctionBegin;
461: PetscCall(PetscSegBufferCreate(1, 10000, &PetscCitationsList));
463: PetscCall(PetscCitationsRegister("@TechReport{petsc-user-ref,\n\
464: Author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Steven Benson and Jed Brown\n\
465: and Peter Brune and Kris Buschelman and Emil Constantinescu and Lisandro Dalcin and Alp Dener\n\
466: and Victor Eijkhout and Jacob Faibussowitsch and William~D. Gropp and V\'{a}clav Hapla and Tobin Isaac and Pierre Jolivet\n\
467: and Dmitry Karpeev and Dinesh Kaushik and Matthew~G. Knepley and Fande Kong and Scott Kruger\n\
468: and Dave~A. May and Lois Curfman McInnes and Richard Tran Mills and Lawrence Mitchell and Todd Munson\n\
469: and Jose~E. Roman and Karl Rupp and Patrick Sanan and Jason Sarich and Barry~F. Smith and Hansol Suh\n\
470: and Stefano Zampini and Hong Zhang and Hong Zhang and Junchao Zhang},\n\
471: Title = {{PETSc/TAO} Users Manual},\n\
472: Number = {ANL-21/39 - Revision 3.22},\n\
473: Doi = {10.2172/2205494},\n\
474: Institution = {Argonne National Laboratory},\n\
475: Year = {2024}\n}\n",
476: NULL));
478: PetscCall(PetscCitationsRegister("@InProceedings{petsc-efficient,\n\
479: Author = {Satish Balay and William D. Gropp and Lois Curfman McInnes and Barry F. Smith},\n\
480: Title = {Efficient Management of Parallelism in Object Oriented Numerical Software Libraries},\n\
481: Booktitle = {Modern Software Tools in Scientific Computing},\n\
482: Editor = {E. Arge and A. M. Bruaset and H. P. Langtangen},\n\
483: Pages = {163--202},\n\
484: Publisher = {Birkh{\\\"{a}}user Press},\n\
485: Year = {1997}\n}\n",
486: NULL));
487: PetscFunctionReturn(PETSC_SUCCESS);
488: }
490: static char programname[PETSC_MAX_PATH_LEN] = ""; /* HP includes entire path in name */
492: PetscErrorCode PetscSetProgramName(const char name[])
493: {
494: PetscFunctionBegin;
495: PetscCall(PetscStrncpy(programname, name, sizeof(programname)));
496: PetscFunctionReturn(PETSC_SUCCESS);
497: }
499: /*@C
500: PetscGetProgramName - Gets the name of the running program.
502: Not Collective
504: Input Parameter:
505: . len - length of the string name
507: Output Parameter:
508: . name - the name of the running program, provide a string of length `PETSC_MAX_PATH_LEN`
510: Level: advanced
512: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
513: @*/
514: PetscErrorCode PetscGetProgramName(char name[], size_t len)
515: {
516: PetscFunctionBegin;
517: PetscCall(PetscStrncpy(name, programname, len));
518: PetscFunctionReturn(PETSC_SUCCESS);
519: }
521: /*@C
522: PetscGetArgs - Allows you to access the raw command line arguments anywhere
523: after PetscInitialize() is called but before `PetscFinalize()`.
525: Not Collective, No Fortran Support
527: Output Parameters:
528: + argc - count of number of command line arguments
529: - args - the command line arguments
531: Level: intermediate
533: Notes:
534: This is usually used to pass the command line arguments into other libraries
535: that are called internally deep in PETSc or the application.
537: The first argument contains the program name as is normal for C programs.
539: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArguments()`, `PetscInitialize()`
540: @*/
541: PetscErrorCode PetscGetArgs(int *argc, char ***args)
542: {
543: PetscFunctionBegin;
544: PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
545: *argc = PetscGlobalArgc;
546: *args = PetscGlobalArgs;
547: PetscFunctionReturn(PETSC_SUCCESS);
548: }
550: /*@C
551: PetscGetArguments - Allows you to access the command line arguments anywhere
552: after `PetscInitialize()` is called but before `PetscFinalize()`.
554: Not Collective, No Fortran Support
556: Output Parameter:
557: . args - the command line arguments
559: Level: intermediate
561: Note:
562: This does NOT start with the program name and IS `NULL` terminated (final arg is void)
564: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscFreeArguments()`, `PetscInitialize()`
565: @*/
566: PetscErrorCode PetscGetArguments(char ***args)
567: {
568: PetscInt i, argc = PetscGlobalArgc;
570: PetscFunctionBegin;
571: PetscCheck(PetscInitializeCalled || !PetscFinalizeCalled, PETSC_COMM_SELF, PETSC_ERR_ORDER, "You must call after PetscInitialize() but before PetscFinalize()");
572: if (!argc) {
573: *args = NULL;
574: PetscFunctionReturn(PETSC_SUCCESS);
575: }
576: PetscCall(PetscMalloc1(argc, args));
577: for (i = 0; i < argc - 1; i++) PetscCall(PetscStrallocpy(PetscGlobalArgs[i + 1], &(*args)[i]));
578: (*args)[argc - 1] = NULL;
579: PetscFunctionReturn(PETSC_SUCCESS);
580: }
582: /*@C
583: PetscFreeArguments - Frees the memory obtained with `PetscGetArguments()`
585: Not Collective, No Fortran Support
587: Output Parameter:
588: . args - the command line arguments
590: Level: intermediate
592: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscGetArguments()`
593: @*/
594: PetscErrorCode PetscFreeArguments(char **args)
595: {
596: PetscFunctionBegin;
597: if (args) {
598: PetscInt i = 0;
600: while (args[i]) PetscCall(PetscFree(args[i++]));
601: PetscCall(PetscFree(args));
602: }
603: PetscFunctionReturn(PETSC_SUCCESS);
604: }
606: #if PetscDefined(HAVE_SAWS)
607: #include <petscconfiginfo.h>
609: PETSC_INTERN PetscErrorCode PetscInitializeSAWs(const char help[])
610: {
611: PetscFunctionBegin;
612: if (!PetscGlobalRank) {
613: char cert[PETSC_MAX_PATH_LEN], root[PETSC_MAX_PATH_LEN], *intro, programname[64], *appline, *options, version[64];
614: int port;
615: PetscBool flg, rootlocal = PETSC_FALSE, flg2, selectport = PETSC_FALSE;
616: size_t applinelen, introlen;
617: char sawsurl[256];
619: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_log", &flg));
620: if (flg) {
621: char sawslog[PETSC_MAX_PATH_LEN];
623: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_log", sawslog, sizeof(sawslog), NULL));
624: if (sawslog[0]) {
625: PetscCallSAWs(SAWs_Set_Use_Logfile, (sawslog));
626: } else {
627: PetscCallSAWs(SAWs_Set_Use_Logfile, (NULL));
628: }
629: }
630: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_https", cert, sizeof(cert), &flg));
631: if (flg) PetscCallSAWs(SAWs_Set_Use_HTTPS, (cert));
632: PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select", &selectport, NULL));
633: if (selectport) {
634: PetscCallSAWs(SAWs_Get_Available_Port, (&port));
635: PetscCallSAWs(SAWs_Set_Port, (port));
636: } else {
637: PetscCall(PetscOptionsGetInt(NULL, NULL, "-saws_port", &port, &flg));
638: if (flg) PetscCallSAWs(SAWs_Set_Port, (port));
639: }
640: PetscCall(PetscOptionsGetString(NULL, NULL, "-saws_root", root, sizeof(root), &flg));
641: if (flg) {
642: PetscCallSAWs(SAWs_Set_Document_Root, (root));
643: PetscCall(PetscStrcmp(root, ".", &rootlocal));
644: } else {
645: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_options", &flg));
646: if (flg) {
647: PetscCall(PetscStrreplace(PETSC_COMM_WORLD, "${PETSC_DIR}/share/petsc/saws", root, sizeof(root)));
648: PetscCallSAWs(SAWs_Set_Document_Root, (root));
649: }
650: }
651: PetscCall(PetscOptionsHasName(NULL, NULL, "-saws_local", &flg2));
652: if (flg2) {
653: char jsdir[PETSC_MAX_PATH_LEN];
654: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_SUP, "-saws_local option requires -saws_root option");
655: PetscCall(PetscSNPrintf(jsdir, sizeof(jsdir), "%s/js", root));
656: PetscCall(PetscTestDirectory(jsdir, 'r', &flg));
657: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_FILE_READ, "-saws_local option requires js directory in root directory");
658: PetscCallSAWs(SAWs_Push_Local_Header, ());
659: }
660: PetscCall(PetscGetProgramName(programname, sizeof(programname)));
661: PetscCall(PetscStrlen(help, &applinelen));
662: introlen = 4096 + applinelen;
663: applinelen += 1024;
664: PetscCall(PetscMalloc(applinelen, &appline));
665: PetscCall(PetscMalloc(introlen, &intro));
667: if (rootlocal) {
668: PetscCall(PetscSNPrintf(appline, applinelen, "%s.c.html", programname));
669: PetscCall(PetscTestFile(appline, 'r', &rootlocal));
670: }
671: PetscCall(PetscOptionsGetAll(NULL, &options));
672: if (rootlocal && help) {
673: PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running <a href=\"%s.c.html\">%s</a> %s</center><br><center><pre>%s</pre></center><br>\n", programname, programname, options, help));
674: } else if (help) {
675: PetscCall(PetscSNPrintf(appline, applinelen, "<center>Running %s %s</center><br><center><pre>%s</pre></center><br>", programname, options, help));
676: } else {
677: PetscCall(PetscSNPrintf(appline, applinelen, "<center> Running %s %s</center><br>\n", programname, options));
678: }
679: PetscCall(PetscFree(options));
680: PetscCall(PetscGetVersion(version, sizeof(version)));
681: PetscCall(PetscSNPrintf(intro, introlen,
682: "<body>\n"
683: "<center><h2> <a href=\"https://petsc.org/\">PETSc</a> Application Web server powered by <a href=\"https://bitbucket.org/saws/saws\">SAWs</a> </h2></center>\n"
684: "<center>This is the default PETSc application dashboard, from it you can access any published PETSc objects or logging data</center><br><center>%s configured with %s</center><br>\n"
685: "%s",
686: version, petscconfigureoptions, appline));
687: PetscCallSAWs(SAWs_Push_Body, ("index.html", 0, intro));
688: PetscCall(PetscFree(intro));
689: PetscCall(PetscFree(appline));
690: if (selectport) {
691: PetscBool silent;
693: /* another process may have grabbed the port so keep trying */
694: while (SAWs_Initialize()) {
695: PetscCallSAWs(SAWs_Get_Available_Port, (&port));
696: PetscCallSAWs(SAWs_Set_Port, (port));
697: }
699: PetscCall(PetscOptionsGetBool(NULL, NULL, "-saws_port_auto_select_silent", &silent, NULL));
700: if (!silent) {
701: PetscCallSAWs(SAWs_Get_FullURL, (sizeof(sawsurl), sawsurl));
702: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Point your browser to %s for SAWs\n", sawsurl));
703: }
704: } else {
705: PetscCallSAWs(SAWs_Initialize, ());
706: }
707: PetscCall(PetscCitationsRegister("@TechReport{ saws,\n"
708: " Author = {Matt Otten and Jed Brown and Barry Smith},\n"
709: " Title = {Scientific Application Web Server (SAWs) Users Manual},\n"
710: " Institution = {Argonne National Laboratory},\n"
711: " Year = 2013\n}\n",
712: NULL));
713: }
714: PetscFunctionReturn(PETSC_SUCCESS);
715: }
716: #endif
718: /* Things must be done before MPI_Init() when MPI is not yet initialized, and can be shared between C init and Fortran init */
719: PETSC_INTERN PetscErrorCode PetscPreMPIInit_Private(void)
720: {
721: PetscFunctionBegin;
722: #if defined(PETSC_HAVE_HWLOC_SOLARIS_BUG)
723: /* see MPI.py for details on this bug */
724: (void)setenv("HWLOC_COMPONENTS", "-x86", 1);
725: #endif
726: PetscFunctionReturn(PETSC_SUCCESS);
727: }
729: #if PetscDefined(HAVE_ADIOS)
730: #include <adios.h>
731: #include <adios_read.h>
732: int64_t Petsc_adios_group;
733: #endif
734: #if PetscDefined(HAVE_OPENMP)
735: #include <omp.h>
736: PetscInt PetscNumOMPThreads;
737: #endif
739: #include <petsc/private/deviceimpl.h>
740: #if PetscDefined(HAVE_CUDA)
741: #include <petscdevice_cuda.h>
742: // REMOVE ME
743: cudaStream_t PetscDefaultCudaStream = NULL;
744: #endif
745: #if PetscDefined(HAVE_HIP)
746: #include <petscdevice_hip.h>
747: // REMOVE ME
748: hipStream_t PetscDefaultHipStream = NULL;
749: #endif
751: #if PetscDefined(HAVE_DLFCN_H)
752: #include <dlfcn.h>
753: #endif
754: PETSC_INTERN PetscErrorCode PetscLogInitialize(void);
755: #if PetscDefined(HAVE_VIENNACL)
756: PETSC_EXTERN PetscErrorCode PetscViennaCLInit(void);
757: PetscBool PetscViennaCLSynchronize = PETSC_FALSE;
758: #endif
760: PetscBool PetscCIEnabled = PETSC_FALSE, PetscCIEnabledPortableErrorOutput = PETSC_FALSE;
762: /*
763: PetscInitialize_Common - shared code between C and Fortran initialization
765: prog: program name
766: file: optional PETSc database file name. Might be in Fortran string format when 'ftn' is true
767: help: program help message
768: ftn: is it called from Fortran initialization (petscinitializef_)?
769: readarguments,len: used when fortran is true
770: */
771: PETSC_INTERN PetscErrorCode PetscInitialize_Common(const char *prog, const char *file, const char *help, PetscBool ftn, PetscBool readarguments, PetscInt len)
772: {
773: PetscMPIInt size;
774: PetscBool flg = PETSC_TRUE;
775: char hostname[256];
776: PetscBool blas_view_flag = PETSC_FALSE;
778: PetscFunctionBegin;
779: if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
780: /* these must be initialized in a routine, not as a constant declaration */
781: PETSC_STDOUT = stdout;
782: PETSC_STDERR = stderr;
784: /* PetscCall can be used from now */
785: PetscErrorHandlingInitialized = PETSC_TRUE;
787: /*
788: The checking over compatible runtime libraries is complicated by the MPI ABI initiative
789: https://wiki.mpich.org/mpich/index.php/ABI_Compatibility_Initiative which started with
790: MPICH v3.1 (Released February 2014)
791: IBM MPI v2.1 (December 2014)
792: Intel MPI Library v5.0 (2014)
793: Cray MPT v7.0.0 (June 2014)
794: As of July 31, 2017 the ABI number still appears to be 12, that is all of the versions
795: listed above and since that time are compatible.
797: Unfortunately the MPI ABI initiative has not defined a way to determine the ABI number
798: at compile time or runtime. Thus we will need to systematically track the allowed versions
799: and how they are represented in the mpi.h and MPI_Get_library_version() output in order
800: to perform the checking.
802: Currently we only check for pre MPI ABI versions (and packages that do not follow the MPI ABI).
804: Questions:
806: Should the checks for ABI incompatibility be only on the major version number below?
807: Presumably the output to stderr will be removed before a release.
808: */
810: #if defined(PETSC_HAVE_MPI_GET_LIBRARY_VERSION)
811: {
812: char mpilibraryversion[MPI_MAX_LIBRARY_VERSION_STRING];
813: PetscMPIInt mpilibraryversionlength;
815: PetscCallMPI(MPI_Get_library_version(mpilibraryversion, &mpilibraryversionlength));
816: /* check for MPICH versions before MPI ABI initiative */
817: #if defined(MPICH_VERSION)
818: #if MPICH_NUMVERSION < 30100000
819: {
820: char *ver, *lf;
821: PetscBool flg = PETSC_FALSE;
823: PetscCall(PetscStrstr(mpilibraryversion, "MPICH Version:", &ver));
824: if (ver) {
825: PetscCall(PetscStrchr(ver, '\n', &lf));
826: if (lf) {
827: *lf = 0;
828: PetscCall(PetscStrendswith(ver, MPICH_VERSION, &flg));
829: }
830: }
831: if (!flg) {
832: PetscCall(PetscInfo(NULL, "PETSc warning --- MPICH library version \n%s does not match what PETSc was compiled with %s.\n", mpilibraryversion, MPICH_VERSION));
833: flg = PETSC_TRUE;
834: }
835: }
836: #endif
837: /* check for Open MPI version, it is not part of the MPI ABI initiative (is it part of another initiative that needs to be handled?) */
838: #elif defined(PETSC_HAVE_OPENMPI)
839: {
840: char *ver, bs[MPI_MAX_LIBRARY_VERSION_STRING], *bsf;
841: PetscBool flg = PETSC_FALSE;
842: #define PSTRSZ 2
843: char ompistr1[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"Open MPI", "FUJITSU MPI"};
844: char ompistr2[PSTRSZ][MPI_MAX_LIBRARY_VERSION_STRING] = {"v", "Library "};
845: int i;
846: for (i = 0; i < PSTRSZ; i++) {
847: PetscCall(PetscStrstr(mpilibraryversion, ompistr1[i], &ver));
848: if (ver) {
849: PetscCall(PetscSNPrintf(bs, MPI_MAX_LIBRARY_VERSION_STRING, "%s%d.%d", ompistr2[i], PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
850: PetscCall(PetscStrstr(ver, bs, &bsf));
851: if (bsf) flg = PETSC_TRUE;
852: break;
853: }
854: }
855: if (!flg) {
856: PetscCall(PetscInfo(NULL, "PETSc warning --- Open MPI library version \n%s does not match what PETSc was compiled with %d.%d.\n", mpilibraryversion, PETSC_PKG_OPENMPI_VERSION_MAJOR, PETSC_PKG_OPENMPI_VERSION_MINOR));
857: flg = PETSC_TRUE;
858: }
859: }
860: #endif
861: }
862: #endif
864: #if defined(PETSC_HAVE_DLADDR) && !(defined(__cray__) && defined(__clang__))
865: /* These symbols are currently in the Open MPI and MPICH libraries; they may not always be, in that case the test will simply not detect the problem */
866: PetscCheck(!dlsym(RTLD_DEFAULT, "ompi_mpi_init") || !dlsym(RTLD_DEFAULT, "MPID_Abort"), PETSC_COMM_SELF, PETSC_ERR_MPI_LIB_INCOMP, "Application was linked against both Open MPI and MPICH based MPI libraries and will not run correctly");
867: #endif
869: /* on Windows - set printf to default to printing 2 digit exponents */
870: #if defined(PETSC_HAVE__SET_OUTPUT_FORMAT)
871: _set_output_format(_TWO_DIGIT_EXPONENT);
872: #endif
874: PetscCall(PetscOptionsCreateDefault());
876: PetscFinalizeCalled = PETSC_FALSE;
878: PetscCall(PetscSetProgramName(prog));
879: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockOpen));
880: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStdout));
881: PetscCall(PetscSpinlockCreate(&PetscViewerASCIISpinLockStderr));
882: PetscCall(PetscSpinlockCreate(&PetscCommSpinLock));
884: if (PETSC_COMM_WORLD == MPI_COMM_NULL) PETSC_COMM_WORLD = MPI_COMM_WORLD;
885: PetscCallMPI(MPI_Comm_set_errhandler(PETSC_COMM_WORLD, MPI_ERRORS_RETURN));
887: if (PETSC_MPI_ERROR_CLASS == MPI_ERR_LASTCODE) {
888: PetscCallMPI(MPI_Add_error_class(&PETSC_MPI_ERROR_CLASS));
889: PetscCallMPI(MPI_Add_error_code(PETSC_MPI_ERROR_CLASS, &PETSC_MPI_ERROR_CODE));
890: }
892: /* Done after init due to a bug in MPICH-GM? */
893: PetscCall(PetscErrorPrintfInitialize());
895: PetscCallMPI(MPI_Comm_rank(MPI_COMM_WORLD, &PetscGlobalRank));
896: PetscCallMPI(MPI_Comm_size(MPI_COMM_WORLD, &PetscGlobalSize));
898: MPIU_BOOL = MPI_INT;
899: MPIU_ENUM = MPI_INT;
900: MPIU_FORTRANADDR = (sizeof(void *) == sizeof(int)) ? MPI_INT : MPIU_INT64;
901: if (sizeof(size_t) == sizeof(unsigned)) MPIU_SIZE_T = MPI_UNSIGNED;
902: else if (sizeof(size_t) == sizeof(unsigned long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG;
903: #if defined(PETSC_SIZEOF_LONG_LONG)
904: else if (sizeof(size_t) == sizeof(unsigned long long)) MPIU_SIZE_T = MPI_UNSIGNED_LONG_LONG;
905: #endif
906: else SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_SUP_SYS, "Could not find MPI type for size_t");
908: /*
909: Initialized the global complex variable; this is because with
910: shared libraries the constructors for global variables
911: are not called; at least on IRIX.
912: */
913: #if defined(PETSC_HAVE_COMPLEX)
914: {
915: #if defined(PETSC_CLANGUAGE_CXX) && !defined(PETSC_USE_REAL___FLOAT128)
916: PetscComplex ic(0.0, 1.0);
917: PETSC_i = ic;
918: #else
919: PETSC_i = _Complex_I;
920: #endif
921: }
922: #endif /* PETSC_HAVE_COMPLEX */
924: /*
925: Create the PETSc MPI reduction operator that sums of the first
926: half of the entries and maxes the second half.
927: */
928: PetscCallMPI(MPI_Op_create(MPIU_MaxSum_Local, 1, &MPIU_MAXSUM_OP));
930: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
931: PetscCallMPI(MPI_Type_contiguous(2, MPI_DOUBLE, &MPIU___FLOAT128));
932: PetscCallMPI(MPI_Type_commit(&MPIU___FLOAT128));
933: #if defined(PETSC_HAVE_COMPLEX)
934: PetscCallMPI(MPI_Type_contiguous(4, MPI_DOUBLE, &MPIU___COMPLEX128));
935: PetscCallMPI(MPI_Type_commit(&MPIU___COMPLEX128));
936: #endif
937: #endif
938: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
939: PetscCallMPI(MPI_Type_contiguous(2, MPI_CHAR, &MPIU___FP16));
940: PetscCallMPI(MPI_Type_commit(&MPIU___FP16));
941: #endif
943: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
944: PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM));
945: PetscCallMPI(MPI_Op_create(PetscMax_Local, 1, &MPIU_MAX));
946: PetscCallMPI(MPI_Op_create(PetscMin_Local, 1, &MPIU_MIN));
947: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
948: PetscCallMPI(MPI_Op_create(PetscSum_Local, 1, &MPIU_SUM___FP16___FLOAT128));
949: #endif
951: PetscCallMPI(MPI_Type_contiguous(2, MPIU_SCALAR, &MPIU_2SCALAR));
952: PetscCallMPI(MPI_Op_create(PetscGarbageKeySortedIntersect, 1, &Petsc_Garbage_SetIntersectOp));
953: PetscCallMPI(MPI_Type_commit(&MPIU_2SCALAR));
955: /* create datatypes used by MPIU_MAXLOC, MPIU_MINLOC and PetscSplitReduction_Op */
956: #if !defined(PETSC_HAVE_MPIUNI)
957: {
958: PetscMPIInt blockSizes[2] = {1, 1};
959: MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_real_int, v), offsetof(struct petsc_mpiu_real_int, i)};
960: MPI_Datatype blockTypes[2] = {MPIU_REAL, MPIU_INT}, tmpStruct;
962: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
963: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_real_int), &MPIU_REAL_INT));
964: PetscCallMPI(MPI_Type_free(&tmpStruct));
965: PetscCallMPI(MPI_Type_commit(&MPIU_REAL_INT));
966: }
967: {
968: PetscMPIInt blockSizes[2] = {1, 1};
969: MPI_Aint blockOffsets[2] = {offsetof(struct petsc_mpiu_scalar_int, v), offsetof(struct petsc_mpiu_scalar_int, i)};
970: MPI_Datatype blockTypes[2] = {MPIU_SCALAR, MPIU_INT}, tmpStruct;
972: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
973: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_scalar_int), &MPIU_SCALAR_INT));
974: PetscCallMPI(MPI_Type_free(&tmpStruct));
975: PetscCallMPI(MPI_Type_commit(&MPIU_SCALAR_INT));
976: }
977: #endif
979: #if defined(PETSC_USE_64BIT_INDICES)
980: PetscCallMPI(MPI_Type_contiguous(2, MPIU_INT, &MPIU_2INT));
981: PetscCallMPI(MPI_Type_commit(&MPIU_2INT));
983: #if !defined(PETSC_HAVE_MPIUNI)
984: {
985: int blockSizes[] = {1, 1};
986: MPI_Aint blockOffsets[] = {offsetof(struct petsc_mpiu_int_mpiint, a), offsetof(struct petsc_mpiu_int_mpiint, b)};
987: MPI_Datatype blockTypes[] = {MPIU_INT, MPI_INT}, tmpStruct;
989: PetscCallMPI(MPI_Type_create_struct(2, blockSizes, blockOffsets, blockTypes, &tmpStruct));
990: PetscCallMPI(MPI_Type_create_resized(tmpStruct, 0, sizeof(struct petsc_mpiu_int_mpiint), &MPIU_INT_MPIINT));
991: PetscCallMPI(MPI_Type_free(&tmpStruct));
992: PetscCallMPI(MPI_Type_commit(&MPIU_INT_MPIINT));
993: }
994: #endif
995: #endif
996: PetscCallMPI(MPI_Type_contiguous(4, MPI_INT, &MPI_4INT));
997: PetscCallMPI(MPI_Type_commit(&MPI_4INT));
998: PetscCallMPI(MPI_Type_contiguous(4, MPIU_INT, &MPIU_4INT));
999: PetscCallMPI(MPI_Type_commit(&MPIU_4INT));
1001: /*
1002: Attributes to be set on PETSc communicators
1003: */
1004: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_Counter_Attr_DeleteFn, &Petsc_Counter_keyval, NULL));
1005: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_InnerComm_Attr_DeleteFn, &Petsc_InnerComm_keyval, NULL));
1006: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_OuterComm_Attr_DeleteFn, &Petsc_OuterComm_keyval, NULL));
1007: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, Petsc_ShmComm_Attr_DeleteFn, &Petsc_ShmComm_keyval, NULL));
1008: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_CreationIdx_keyval, NULL));
1009: PetscCallMPI(MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Garbage_HMap_keyval, NULL));
1011: #if defined(PETSC_USE_FORTRAN_BINDINGS)
1012: if (ftn) PetscCall(PetscInitFortran_Private(readarguments, file, len));
1013: else
1014: #endif
1015: PetscCall(PetscOptionsInsert(NULL, &PetscGlobalArgc, &PetscGlobalArgs, file));
1017: if (PetscDefined(HAVE_MPIUNI)) {
1018: const char *mpienv = getenv("PMI_SIZE");
1019: if (!mpienv) mpienv = getenv("OMPI_COMM_WORLD_SIZE");
1020: if (mpienv) {
1021: PetscInt isize;
1022: PetscBool mflag = PETSC_FALSE;
1024: PetscCall(PetscOptionsStringToInt(mpienv, &isize));
1025: PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpiuni-allow-multiprocess-launch", &mflag, NULL));
1026: PetscCheck(isize == 1 || mflag, MPI_COMM_SELF, PETSC_ERR_MPI, "You are using an MPI-uni (sequential) install of PETSc but trying to launch parallel jobs; you need full MPI version of PETSc. Or run with -mpiuni-allow-multiprocess-launch to allow multiple independent MPI-uni jobs.");
1027: }
1028: }
1030: /* call a second time so it can look in the options database */
1031: PetscCall(PetscErrorPrintfInitialize());
1033: /*
1034: Check system options and print help
1035: */
1036: PetscCall(PetscOptionsCheckInitial_Private(help));
1038: /*
1039: Creates the logging data structures; this is enabled even if logging is not turned on
1040: This is the last thing we do before returning to the user code to prevent having the
1041: logging numbers contaminated by any startup time associated with MPI
1042: */
1043: PetscCall(PetscLogInitialize());
1045: /*
1046: Initialize PetscDevice and PetscDeviceContext
1048: Note to any future devs thinking of moving this, proper initialization requires:
1049: 1. MPI initialized
1050: 2. Options DB initialized
1051: 3. Petsc error handling initialized, specifically signal handlers. This expects to set up
1052: its own SIGSEV handler via the push/pop interface.
1053: 4. Logging initialized
1054: */
1055: PetscCall(PetscDeviceInitializeFromOptions_Internal(PETSC_COMM_WORLD));
1057: #if PetscDefined(HAVE_VIENNACL)
1058: flg = PETSC_FALSE;
1059: PetscCall(PetscOptionsHasName(NULL, NULL, "-log_view", &flg));
1060: if (!flg) PetscCall(PetscOptionsGetBool(NULL, NULL, "-viennacl_synchronize", &flg, NULL));
1061: PetscViennaCLSynchronize = flg;
1062: PetscCall(PetscViennaCLInit());
1063: #endif
1065: PetscCall(PetscCitationsInitialize());
1067: #if defined(PETSC_HAVE_SAWS)
1068: PetscCall(PetscInitializeSAWs(ftn ? NULL : help));
1069: flg = PETSC_FALSE;
1070: PetscCall(PetscOptionsHasName(NULL, NULL, "-stack_view", &flg));
1071: if (flg) PetscCall(PetscStackViewSAWs());
1072: #endif
1074: /*
1075: Load the dynamic libraries (on machines that support them), this registers all
1076: the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
1077: */
1078: PetscCall(PetscInitialize_DynamicLibraries());
1080: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
1081: PetscCall(PetscInfo(NULL, "PETSc successfully started: number of processors = %d\n", size));
1082: PetscCall(PetscGetHostName(hostname, sizeof(hostname)));
1083: PetscCall(PetscInfo(NULL, "Running on machine: %s\n", hostname));
1084: #if defined(PETSC_HAVE_OPENMP)
1085: {
1086: PetscBool omp_view_flag;
1087: char *threads = getenv("OMP_NUM_THREADS");
1089: if (threads) {
1090: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %s (as given by OMP_NUM_THREADS)\n", threads));
1091: (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumOMPThreads);
1092: } else {
1093: PetscNumOMPThreads = (PetscInt)omp_get_max_threads();
1094: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (as given by omp_get_max_threads())\n", PetscNumOMPThreads));
1095: }
1096: PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "OpenMP options", "Sys");
1097: PetscCall(PetscOptionsInt("-omp_num_threads", "Number of OpenMP threads to use (can also use environmental variable OMP_NUM_THREADS", "None", PetscNumOMPThreads, &PetscNumOMPThreads, &flg));
1098: PetscCall(PetscOptionsName("-omp_view", "Display OpenMP number of threads", NULL, &omp_view_flag));
1099: PetscOptionsEnd();
1100: if (flg) {
1101: PetscCall(PetscInfo(NULL, "Number of OpenMP threads %" PetscInt_FMT " (given by -omp_num_threads)\n", PetscNumOMPThreads));
1102: omp_set_num_threads((int)PetscNumOMPThreads);
1103: }
1104: if (omp_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "OpenMP: number of threads %" PetscInt_FMT "\n", PetscNumOMPThreads));
1105: }
1106: #endif
1108: PetscOptionsBegin(PETSC_COMM_WORLD, NULL, "BLAS options", "Sys");
1109: PetscCall(PetscOptionsName("-blas_view", "Display number of threads to use for BLAS operations", NULL, &blas_view_flag));
1110: #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS) || defined(PETSC_HAVE_MKL_SET_NUM_THREADS) || defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1111: {
1112: char *threads = NULL;
1114: /* determine any default number of threads requested in the environment; TODO: Apple libraries? */
1115: #if defined(PETSC_HAVE_BLI_THREAD_SET_NUM_THREADS)
1116: threads = getenv("BLIS_NUM_THREADS");
1117: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by BLIS_NUM_THREADS\n", threads));
1118: if (!threads) {
1119: threads = getenv("OMP_NUM_THREADS");
1120: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of BLIS threads %s given by OMP_NUM_THREADS\n", threads));
1121: }
1122: #elif defined(PETSC_HAVE_MKL_SET_NUM_THREADS)
1123: threads = getenv("MKL_NUM_THREADS");
1124: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by MKL_NUM_THREADS\n", threads));
1125: if (!threads) {
1126: threads = getenv("OMP_NUM_THREADS");
1127: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of MKL threads %s given by OMP_NUM_THREADS\n", threads));
1128: }
1129: #elif defined(PETSC_HAVE_OPENBLAS_SET_NUM_THREADS)
1130: threads = getenv("OPENBLAS_NUM_THREADS");
1131: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OPENBLAS_NUM_THREADS\n", threads));
1132: if (!threads) {
1133: threads = getenv("OMP_NUM_THREADS");
1134: if (threads) PetscCall(PetscInfo(NULL, "BLAS: Environment number of OpenBLAS threads %s given by OMP_NUM_THREADS\n", threads));
1135: }
1136: #endif
1137: if (threads) (void)sscanf(threads, "%" PetscInt_FMT, &PetscNumBLASThreads);
1138: PetscCall(PetscOptionsInt("-blas_num_threads", "Number of threads to use for BLAS operations", "None", PetscNumBLASThreads, &PetscNumBLASThreads, &flg));
1139: if (flg) PetscCall(PetscInfo(NULL, "BLAS: Command line number of BLAS thread %" PetscInt_FMT "given by -blas_num_threads\n", PetscNumBLASThreads));
1140: if (flg || threads) {
1141: PetscCall(PetscBLASSetNumThreads(PetscNumBLASThreads));
1142: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: number of threads %" PetscInt_FMT "\n", PetscNumBLASThreads));
1143: }
1144: }
1145: #elif defined(PETSC_HAVE_APPLE_ACCELERATE)
1146: PetscCall(PetscInfo(NULL, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1147: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: Apple Accelerate library, thread support with no user control\n"));
1148: #else
1149: if (blas_view_flag) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "BLAS: no thread support\n"));
1150: #endif
1151: PetscOptionsEnd();
1153: #if defined(PETSC_USE_PETSC_MPI_EXTERNAL32)
1154: /*
1155: Tell MPI about our own data representation converter, this would/should be used if extern32 is not supported by the MPI
1157: Currently not used because it is not supported by MPICH.
1158: */
1159: if (!PetscBinaryBigEndian()) PetscCallMPI(MPI_Register_datarep((char *)"petsc", PetscDataRep_read_conv_fn, PetscDataRep_write_conv_fn, PetscDataRep_extent_fn, NULL));
1160: #endif
1162: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1163: PetscCall(PetscFPTCreate(10000));
1164: #endif
1166: #if defined(PETSC_HAVE_HWLOC)
1167: {
1168: PetscViewer viewer;
1169: PetscCall(PetscOptionsCreateViewer(PETSC_COMM_WORLD, NULL, NULL, "-process_view", &viewer, NULL, &flg));
1170: if (flg) {
1171: PetscCall(PetscProcessPlacementView(viewer));
1172: PetscCall(PetscViewerDestroy(&viewer));
1173: }
1174: }
1175: #endif
1177: flg = PETSC_TRUE;
1178: PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewfromoptions", &flg, NULL));
1179: if (!flg) PetscCall(PetscOptionsPushCreateViewerOff(PETSC_TRUE));
1181: #if defined(PETSC_HAVE_ADIOS)
1182: PetscCallExternal(adios_init_noxml, PETSC_COMM_WORLD);
1183: PetscCallExternal(adios_declare_group, &Petsc_adios_group, "PETSc", "", adios_stat_default);
1184: PetscCallExternal(adios_select_method, Petsc_adios_group, "MPI", "", "");
1185: PetscCallExternal(adios_read_init_method, ADIOS_READ_METHOD_BP, PETSC_COMM_WORLD, "");
1186: #endif
1188: #if defined(__VALGRIND_H)
1189: PETSC_RUNNING_ON_VALGRIND = RUNNING_ON_VALGRIND ? PETSC_TRUE : PETSC_FALSE;
1190: #if defined(PETSC_USING_DARWIN) && defined(PETSC_BLASLAPACK_SDOT_RETURNS_DOUBLE)
1191: if (PETSC_RUNNING_ON_VALGRIND) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING: Running valgrind with the macOS native BLAS and LAPACK can fail. If it fails, try configuring with --download-fblaslapack or --download-f2cblaslapack"));
1192: #endif
1193: #endif
1194: /*
1195: Set flag that we are completely initialized
1196: */
1197: PetscInitializeCalled = PETSC_TRUE;
1199: PetscCall(PetscOptionsHasName(NULL, NULL, "-python", &flg));
1200: if (flg) PetscCall(PetscPythonInitialize(NULL, NULL));
1202: PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1203: if (flg) PetscCall(PetscInfo(NULL, "Running MPI Linear Solver Server\n"));
1204: if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerBegin());
1205: else PetscCheck(!flg, PETSC_COMM_WORLD, PETSC_ERR_SUP, "PETSc configured using -with-single-library=0; -mpi_linear_solver_server not supported in that case");
1206: PetscFunctionReturn(PETSC_SUCCESS);
1207: }
1209: // "Unknown section 'Environmental Variables'"
1210: // PetscClangLinter pragma disable: -fdoc-section-header-unknown
1211: /*@C
1212: PetscInitialize - Initializes the PETSc database and MPI.
1213: `PetscInitialize()` calls MPI_Init() if that has yet to be called,
1214: so this routine should always be called near the beginning of
1215: your program -- usually the very first line!
1217: Collective on `MPI_COMM_WORLD` or `PETSC_COMM_WORLD` if it has been set
1219: Input Parameters:
1220: + argc - count of number of command line arguments
1221: . args - the command line arguments
1222: . file - [optional] PETSc database file, append ":yaml" to filename to specify YAML options format.
1223: Use NULL or empty string to not check for code specific file.
1224: Also checks ~/.petscrc, .petscrc and petscrc.
1225: Use -skip_petscrc in the code specific file (or command line) to skip ~/.petscrc, .petscrc and petscrc files.
1226: - help - [optional] Help message to print, use NULL for no message
1228: If you wish PETSc code to run ONLY on a subcommunicator of `MPI_COMM_WORLD`, create that
1229: communicator first and assign it to `PETSC_COMM_WORLD` BEFORE calling `PetscInitialize()`. Thus if you are running a
1230: four process job and two processes will run PETSc and have `PetscInitialize()` and PetscFinalize() and two process will not,
1231: then do this. If ALL processes in the job are using `PetscInitialize()` and `PetscFinalize()` then you don't need to do this, even
1232: if different subcommunicators of the job are doing different things with PETSc.
1234: Options Database Keys:
1235: + -help [intro] - prints help method for each option; if intro is given the program stops after printing the introductory help message
1236: . -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger
1237: . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
1238: . -on_error_emacs <machinename> - causes emacsclient to jump to error file
1239: . -on_error_abort - calls `abort()` when error detected (no traceback)
1240: . -on_error_mpiabort - calls `MPI_abort()` when error detected
1241: . -error_output_stdout - prints PETSc error messages to stdout instead of the default stderr
1242: . -error_output_none - does not print the error messages (but handles errors in the same way as if this was not called)
1243: . -debugger_ranks [rank1,rank2,...] - Indicates ranks to start in debugger
1244: . -debugger_pause [sleeptime] (in seconds) - Pauses debugger
1245: . -stop_for_debugger - Print message on how to attach debugger manually to
1246: process and wait (-debugger_pause) seconds for attachment
1247: . -malloc_dump - prints a list of all unfreed memory at the end of the run
1248: . -malloc_test - like -malloc_dump -malloc_debug, only active for debugging build, ignored in optimized build. Often set in PETSC_OPTIONS environmental variable
1249: . -malloc_view - show a list of all allocated memory during `PetscFinalize()`
1250: . -malloc_view_threshold <t> - only list memory allocations of size greater than t with -malloc_view
1251: . -malloc_requested_size - malloc logging will record the requested size rather than size after alignment
1252: . -fp_trap - Stops on floating point exceptions
1253: . -no_signal_handler - Indicates not to trap error signals
1254: . -shared_tmp - indicates /tmp directory is shared by all processors
1255: . -not_shared_tmp - each processor has own /tmp
1256: . -tmp - alternative name of /tmp directory
1257: - -mpiuni-allow-multiprocess-launch - allow mpiexec to launch multiple independent MPI-Uni jobs, otherwise a sanity check error is invoked to prevent misuse of MPI-Uni
1259: Options Database Keys for Option Database:
1260: + -skip_petscrc - skip the default option files ~/.petscrc, .petscrc, petscrc
1261: . -options_monitor - monitor all set options to standard output for the whole program run
1262: - -options_monitor_cancel - cancel options monitoring hard-wired using `PetscOptionsMonitorSet()`
1264: Options -options_monitor_{all,cancel} are
1265: position-independent and apply to all options set since the PETSc start.
1266: They can be used also in option files.
1268: See `PetscOptionsMonitorSet()` to do monitoring programmatically.
1270: Options Database Keys for Profiling:
1271: See Users-Manual: ch_profiling for details.
1272: + -info [filename][:[~]<list,of,classnames>[:[~]self]] - Prints verbose information. See `PetscInfo()`.
1273: . -log_sync - Enable barrier synchronization for all events. This option is useful to debug imbalance within each event,
1274: however it slows things down and gives a distorted view of the overall runtime.
1275: . -log_trace [filename] - Print traces of all PETSc calls to the screen (useful to determine where a program
1276: hangs without running in the debugger). See `PetscLogTraceBegin()`.
1277: . -log_view [:filename:format][,[:filename:format]...] - Prints summary of flop and timing information to screen or file, see `PetscLogView()` (up to 4 viewers)
1278: . -log_view_memory - Includes in the summary from -log_view the memory used in each event, see `PetscLogView()`.
1279: . -log_view_gpu_time - Includes in the summary from -log_view the time used in each GPU kernel, see `PetscLogView().
1280: . -log_exclude: <vec,mat,pc,ksp,snes> - excludes subset of object classes from logging
1281: . -log [filename] - Logs profiling information in a dump file, see `PetscLogDump()`.
1282: . -log_all [filename] - Same as `-log`.
1283: . -log_mpe [filename] - Creates a logfile viewable by the utility Jumpshot (in MPICH distribution)
1284: . -log_perfstubs - Starts a log handler with the perfstubs interface (which is used by TAU)
1285: . -log_nvtx - Starts an nvtx log handler for use with Nsight
1286: . -viewfromoptions on,off - Enable or disable `XXXSetFromOptions()` calls, for applications with many small solves turn this off
1287: . -get_total_flops - Returns total flops done by all processors
1288: . -memory_view - Print memory usage at end of run
1289: - -check_pointer_intensity 0,1,2 - if pointers are checked for validity (debug version only), using 0 will result in faster code
1291: Options Database Keys for SAWs:
1292: + -saws_port <portnumber> - port number to publish SAWs data, default is 8080
1293: . -saws_port_auto_select - have SAWs select a new unique port number where it publishes the data, the URL is printed to the screen
1294: this is useful when you are running many jobs that utilize SAWs at the same time
1295: . -saws_log <filename> - save a log of all SAWs communication
1296: . -saws_https <certificate file> - have SAWs use HTTPS instead of HTTP
1297: - -saws_root <directory> - allow SAWs to have access to the given directory to search for requested resources and files
1299: Environmental Variables:
1300: + `PETSC_TMP` - alternative tmp directory
1301: . `PETSC_SHARED_TMP` - tmp is shared by all processes
1302: . `PETSC_NOT_SHARED_TMP` - each process has its own private tmp
1303: . `PETSC_OPTIONS` - a string containing additional options for petsc in the form of command line "-key value" pairs
1304: . `PETSC_OPTIONS_YAML` - (requires configuring PETSc to use libyaml) a string containing additional options for petsc in the form of a YAML document
1305: . `PETSC_VIEWER_SOCKET_PORT` - socket number to use for socket viewer
1306: - `PETSC_VIEWER_SOCKET_MACHINE` - machine to use for socket viewer to connect to
1308: Level: beginner
1310: Note:
1311: If for some reason you must call `MPI_Init()` separately, call
1312: it before `PetscInitialize()`.
1314: Fortran Notes:
1315: In Fortran this routine can be called with
1316: .vb
1317: call PetscInitialize(ierr)
1318: call PetscInitialize(file,ierr) or
1319: call PetscInitialize(file,help,ierr)
1320: .ve
1322: If your main program is C but you call Fortran code that also uses PETSc you need to call `PetscInitializeFortran()` soon after
1323: calling `PetscInitialize()`.
1325: Options Database Key for Developers:
1326: . -checkfunctionlist - automatically checks that function lists associated with objects are correctly cleaned up. Produces messages of the form:
1327: "function name: MatInodeGetInodeSizes_C" if they are not cleaned up. This flag is always set for the test harness (in framework.py)
1329: .seealso: `PetscFinalize()`, `PetscInitializeFortran()`, `PetscGetArgs()`, `PetscInitializeNoArguments()`, `PetscLogGpuTime()`
1330: @*/
1331: PetscErrorCode PetscInitialize(int *argc, char ***args, const char file[], const char help[])
1332: {
1333: PetscMPIInt flag;
1334: const char *prog = "Unknown Name";
1336: PetscFunctionBegin;
1337: if (PetscInitializeCalled) PetscFunctionReturn(PETSC_SUCCESS);
1338: PetscCallMPI(MPI_Initialized(&flag));
1339: if (!flag) {
1340: PetscCheck(PETSC_COMM_WORLD == MPI_COMM_NULL, PETSC_COMM_SELF, PETSC_ERR_SUP, "You cannot set PETSC_COMM_WORLD if you have not initialized MPI first");
1341: PetscCall(PetscPreMPIInit_Private());
1342: #if defined(PETSC_HAVE_MPI_INIT_THREAD)
1343: {
1344: PetscMPIInt provided;
1345: PetscCallMPI(MPI_Init_thread(argc, args, PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE ? MPI_THREAD_FUNNELED : PETSC_MPI_THREAD_REQUIRED, &provided));
1346: PetscCheck(PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE || provided >= PETSC_MPI_THREAD_REQUIRED, PETSC_COMM_SELF, PETSC_ERR_MPI, "The MPI implementation's provided thread level is less than what you required");
1347: if (PETSC_MPI_THREAD_REQUIRED == PETSC_DECIDE) PETSC_MPI_THREAD_REQUIRED = MPI_THREAD_FUNNELED; // assign it a valid value after check-up
1348: }
1349: #else
1350: PetscCallMPI(MPI_Init(argc, args));
1351: #endif
1352: PetscBeganMPI = PETSC_TRUE;
1353: }
1355: if (argc && *argc) prog = **args;
1356: if (argc && args) {
1357: PetscGlobalArgc = *argc;
1358: PetscGlobalArgs = *args;
1359: }
1360: PetscCall(PetscInitialize_Common(prog, file, help, PETSC_FALSE, PETSC_FALSE, 0));
1361: PetscFunctionReturn(PETSC_SUCCESS);
1362: }
1364: PETSC_INTERN PetscObject *PetscObjects;
1365: PETSC_INTERN PetscInt PetscObjectsCounts;
1366: PETSC_INTERN PetscInt PetscObjectsMaxCounts;
1367: PETSC_INTERN PetscBool PetscObjectsLog;
1369: /*
1370: Frees all the MPI types and operations that PETSc may have created
1371: */
1372: PetscErrorCode PetscFreeMPIResources(void)
1373: {
1374: PetscFunctionBegin;
1375: #if defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)
1376: PetscCallMPI(MPI_Type_free(&MPIU___FLOAT128));
1377: #if defined(PETSC_HAVE_COMPLEX)
1378: PetscCallMPI(MPI_Type_free(&MPIU___COMPLEX128));
1379: #endif
1380: #endif
1381: #if defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16)
1382: PetscCallMPI(MPI_Type_free(&MPIU___FP16));
1383: #endif
1385: #if defined(PETSC_USE_REAL___FLOAT128) || defined(PETSC_USE_REAL___FP16)
1386: PetscCallMPI(MPI_Op_free(&MPIU_SUM));
1387: PetscCallMPI(MPI_Op_free(&MPIU_MAX));
1388: PetscCallMPI(MPI_Op_free(&MPIU_MIN));
1389: #elif (defined(PETSC_HAVE_REAL___FLOAT128) && !defined(PETSC_SKIP_REAL___FLOAT128)) || (defined(PETSC_HAVE_REAL___FP16) && !defined(PETSC_SKIP_REAL___FP16))
1390: PetscCallMPI(MPI_Op_free(&MPIU_SUM___FP16___FLOAT128));
1391: #endif
1393: PetscCallMPI(MPI_Type_free(&MPIU_2SCALAR));
1394: PetscCallMPI(MPI_Type_free(&MPIU_REAL_INT));
1395: PetscCallMPI(MPI_Type_free(&MPIU_SCALAR_INT));
1396: #if defined(PETSC_USE_64BIT_INDICES)
1397: PetscCallMPI(MPI_Type_free(&MPIU_2INT));
1398: PetscCallMPI(MPI_Type_free(&MPIU_INT_MPIINT));
1399: #endif
1400: PetscCallMPI(MPI_Type_free(&MPI_4INT));
1401: PetscCallMPI(MPI_Type_free(&MPIU_4INT));
1402: PetscCallMPI(MPI_Op_free(&MPIU_MAXSUM_OP));
1403: PetscCallMPI(MPI_Op_free(&Petsc_Garbage_SetIntersectOp));
1404: PetscFunctionReturn(PETSC_SUCCESS);
1405: }
1407: PETSC_INTERN PetscErrorCode PetscLogFinalize(void);
1408: PETSC_EXTERN PetscErrorCode PetscFreeAlign(void *, int, const char[], const char[]);
1410: /*@
1411: PetscFinalize - Checks for options to be called at the conclusion
1412: of the program. `MPI_Finalize()` is called only if the user had not
1413: called `MPI_Init()` before calling `PetscInitialize()`.
1415: Collective on `PETSC_COMM_WORLD`
1417: Options Database Keys:
1418: + -options_view - Calls `PetscOptionsView()`
1419: . -options_left - Prints unused options that remain in the database
1420: . -objects_dump [all] - Prints list of objects allocated by the user that have not been freed, the option all cause all outstanding objects to be listed
1421: . -mpidump - Calls PetscMPIDump()
1422: . -malloc_dump <optional filename> - Calls `PetscMallocDump()`, displays all memory allocated that has not been freed
1423: . -memory_view - Prints total memory usage
1424: - -malloc_view <optional filename> - Prints list of all memory allocated and in what functions
1426: Level: beginner
1428: Note:
1429: See `PetscInitialize()` for other runtime options.
1431: .seealso: `PetscInitialize()`, `PetscOptionsView()`, `PetscMallocDump()`, `PetscMPIDump()`, `PetscEnd()`
1432: @*/
1433: PetscErrorCode PetscFinalize(void)
1434: {
1435: PetscMPIInt rank;
1436: PetscInt nopt;
1437: PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE, flg3 = PETSC_FALSE;
1438: PetscBool flg;
1439: char mname[PETSC_MAX_PATH_LEN];
1441: PetscFunctionBegin;
1442: PetscCheck(PetscInitializeCalled, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "PetscInitialize() must be called before PetscFinalize()");
1443: PetscCall(PetscInfo(NULL, "PetscFinalize() called\n"));
1445: PetscCall(PetscOptionsHasName(NULL, NULL, "-mpi_linear_solver_server", &flg));
1446: if (PetscDefined(USE_SINGLE_LIBRARY) && flg) PetscCall(PCMPIServerEnd());
1448: PetscCall(PetscFreeAlign(PetscGlobalArgsFortran, 0, NULL, NULL));
1449: PetscGlobalArgc = 0;
1450: PetscGlobalArgs = NULL;
1452: /* Clean up Garbage automatically on COMM_SELF and COMM_WORLD at finalize */
1453: {
1454: union
1455: {
1456: MPI_Comm comm;
1457: void *ptr;
1458: } ucomm;
1459: PetscMPIInt flg;
1460: void *tmp;
1462: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1463: if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1464: if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_SELF));
1465: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1466: if (flg) PetscCallMPI(MPI_Comm_get_attr(ucomm.comm, Petsc_Garbage_HMap_keyval, &tmp, &flg));
1467: if (flg) PetscCall(PetscGarbageCleanup(PETSC_COMM_WORLD));
1468: }
1470: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1471: #if defined(PETSC_HAVE_ADIOS)
1472: PetscCallExternal(adios_read_finalize_method, ADIOS_READ_METHOD_BP_AGGREGATE);
1473: PetscCallExternal(adios_finalize, rank);
1474: #endif
1475: PetscCall(PetscOptionsHasName(NULL, NULL, "-citations", &flg));
1476: if (flg) {
1477: char *cits, filename[PETSC_MAX_PATH_LEN];
1478: FILE *fd = PETSC_STDOUT;
1480: PetscCall(PetscOptionsGetString(NULL, NULL, "-citations", filename, sizeof(filename), NULL));
1481: if (filename[0]) PetscCall(PetscFOpen(PETSC_COMM_WORLD, filename, "w", &fd));
1482: PetscCall(PetscSegBufferGet(PetscCitationsList, 1, &cits));
1483: cits[0] = 0;
1484: PetscCall(PetscSegBufferExtractAlloc(PetscCitationsList, &cits));
1485: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "If you publish results based on this computation please cite the following:\n"));
1486: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1487: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "%s", cits));
1488: PetscCall(PetscFPrintf(PETSC_COMM_WORLD, fd, "===========================================================================\n"));
1489: PetscCall(PetscFClose(PETSC_COMM_WORLD, fd));
1490: PetscCall(PetscFree(cits));
1491: }
1492: PetscCall(PetscSegBufferDestroy(&PetscCitationsList));
1494: #if defined(PETSC_SERIALIZE_FUNCTIONS)
1495: PetscCall(PetscFPTDestroy());
1496: #endif
1498: #if defined(PETSC_HAVE_SAWS)
1499: flg = PETSC_FALSE;
1500: PetscCall(PetscOptionsGetBool(NULL, NULL, "-saw_options", &flg, NULL));
1501: if (flg) PetscCall(PetscOptionsSAWsDestroy());
1502: #endif
1504: #if defined(PETSC_HAVE_X)
1505: flg1 = PETSC_FALSE;
1506: PetscCall(PetscOptionsGetBool(NULL, NULL, "-x_virtual", &flg1, NULL));
1507: if (flg1) {
1508: /* this is a crude hack, but better than nothing */
1509: PetscCall(PetscPOpen(PETSC_COMM_WORLD, NULL, "pkill -15 Xvfb", "r", NULL));
1510: }
1511: #endif
1513: #if !defined(PETSC_HAVE_THREADSAFETY)
1514: PetscCall(PetscOptionsGetBool(NULL, NULL, "-memory_view", &flg2, NULL));
1515: if (flg2) PetscCall(PetscMemoryView(PETSC_VIEWER_STDOUT_WORLD, "Summary of Memory Usage in PETSc\n"));
1516: #endif
1518: if (PetscDefined(USE_LOG)) {
1519: flg1 = PETSC_FALSE;
1520: PetscCall(PetscOptionsGetBool(NULL, NULL, "-get_total_flops", &flg1, NULL));
1521: if (flg1) {
1522: PetscLogDouble flops = 0;
1523: PetscCallMPI(MPI_Reduce(&petsc_TotalFlops, &flops, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
1524: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Total flops over all processors %g\n", flops));
1525: }
1526: }
1528: if (PetscDefined(USE_LOG) && PetscDefined(HAVE_MPE)) {
1529: mname[0] = 0;
1530: PetscCall(PetscOptionsGetString(NULL, NULL, "-log_mpe", mname, sizeof(mname), &flg1));
1531: if (flg1) PetscCall(PetscLogMPEDump(mname[0] ? mname : NULL));
1532: }
1534: #if defined(PETSC_HAVE_KOKKOS)
1535: // Free petsc/kokkos stuff before the potentially non-null petsc default gpu stream is destroyed by PetscObjectRegisterDestroyAll
1536: if (PetscKokkosInitialized) {
1537: PetscCall(PetscKokkosFinalize_Private());
1538: PetscKokkosInitialized = PETSC_FALSE;
1539: }
1540: #endif
1542: // Free all objects registered with PetscObjectRegisterDestroy() such as PETSC_VIEWER_XXX_().
1543: PetscCall(PetscObjectRegisterDestroyAll());
1545: if (PetscDefined(USE_LOG)) {
1546: PetscCall(PetscOptionsPushCreateViewerOff(PETSC_FALSE));
1547: PetscCall(PetscLogViewFromOptions());
1548: PetscCall(PetscOptionsPopCreateViewerOff());
1549: // It should be turned on with PetscLogGpuTime() and never turned off except in this place
1550: PetscLogGpuTimeFlag = PETSC_FALSE;
1552: // Free any objects created by the last block of code.
1553: PetscCall(PetscObjectRegisterDestroyAll());
1555: mname[0] = 0;
1556: PetscCall(PetscOptionsGetString(NULL, NULL, "-log_all", mname, sizeof(mname), &flg1));
1557: PetscCall(PetscOptionsGetString(NULL, NULL, "-log", mname, sizeof(mname), &flg2));
1558: if (flg1 || flg2) PetscCall(PetscLogDump(mname));
1559: }
1561: flg1 = PETSC_FALSE;
1562: PetscCall(PetscOptionsGetBool(NULL, NULL, "-no_signal_handler", &flg1, NULL));
1563: if (!flg1) PetscCall(PetscPopSignalHandler());
1564: flg1 = PETSC_FALSE;
1565: PetscCall(PetscOptionsGetBool(NULL, NULL, "-mpidump", &flg1, NULL));
1566: if (flg1) PetscCall(PetscMPIDump(stdout));
1567: flg1 = PETSC_FALSE;
1568: flg2 = PETSC_FALSE;
1569: /* preemptive call to avoid listing this option in options table as unused */
1570: PetscCall(PetscOptionsHasName(NULL, NULL, "-malloc_dump", &flg1));
1571: PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1572: PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_view", &flg2, NULL));
1574: if (flg2) { PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD)); }
1576: /* to prevent PETSc -options_left from warning */
1577: PetscCall(PetscOptionsHasName(NULL, NULL, "-nox", &flg1));
1578: PetscCall(PetscOptionsHasName(NULL, NULL, "-nox_warning", &flg1));
1580: flg3 = PETSC_FALSE; /* default value is required */
1581: PetscCall(PetscOptionsGetBool(NULL, NULL, "-options_left", &flg3, &flg1));
1582: if (!flg1) flg3 = PETSC_TRUE;
1583: if (flg3) {
1584: if (!flg2 && flg1) { /* have not yet printed the options */
1585: PetscCall(PetscOptionsView(NULL, PETSC_VIEWER_STDOUT_WORLD));
1586: }
1587: PetscCall(PetscOptionsAllUsed(NULL, &nopt));
1588: if (nopt) {
1589: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! There are options you set that were not used!\n"));
1590: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "WARNING! could be spelling mistake, etc!\n"));
1591: if (nopt == 1) {
1592: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There is one unused database option. It is:\n"));
1593: } else {
1594: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are %" PetscInt_FMT " unused database options. They are:\n", nopt));
1595: }
1596: } else if (flg3 && flg1) {
1597: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "There are no unused options.\n"));
1598: }
1599: PetscCall(PetscOptionsLeft(NULL));
1600: }
1602: #if defined(PETSC_HAVE_SAWS)
1603: if (!PetscGlobalRank) {
1604: PetscCall(PetscStackSAWsViewOff());
1605: PetscCallSAWs(SAWs_Finalize, ());
1606: }
1607: #endif
1609: /*
1610: List all objects the user may have forgot to free
1611: */
1612: if (PetscDefined(USE_LOG) && PetscObjectsLog) {
1613: PetscCall(PetscOptionsHasName(NULL, NULL, "-objects_dump", &flg1));
1614: if (flg1) {
1615: MPI_Comm local_comm;
1616: char string[64];
1618: PetscCall(PetscOptionsGetString(NULL, NULL, "-objects_dump", string, sizeof(string), NULL));
1619: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1620: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1621: PetscCall(PetscObjectsDump(stdout, (string[0] == 'a') ? PETSC_TRUE : PETSC_FALSE));
1622: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1623: PetscCallMPI(MPI_Comm_free(&local_comm));
1624: }
1625: }
1627: PetscObjectsCounts = 0;
1628: PetscObjectsMaxCounts = 0;
1629: PetscCall(PetscFree(PetscObjects));
1631: /*
1632: Destroy any packages that registered a finalize
1633: */
1634: PetscCall(PetscRegisterFinalizeAll());
1636: PetscCall(PetscLogFinalize());
1638: /*
1639: Print PetscFunctionLists that have not been properly freed
1640: */
1641: if (PetscPrintFunctionList) PetscCall(PetscFunctionListPrintAll());
1643: if (petsc_history) {
1644: PetscCall(PetscCloseHistoryFile(&petsc_history));
1645: petsc_history = NULL;
1646: }
1647: PetscCall(PetscOptionsHelpPrintedDestroy(&PetscOptionsHelpPrintedSingleton));
1648: PetscCall(PetscInfoDestroy());
1650: #if !defined(PETSC_HAVE_THREADSAFETY)
1651: if (!(PETSC_RUNNING_ON_VALGRIND)) {
1652: char fname[PETSC_MAX_PATH_LEN];
1653: char sname[PETSC_MAX_PATH_LEN];
1654: FILE *fd;
1655: int err;
1657: flg2 = PETSC_FALSE;
1658: flg3 = PETSC_FALSE;
1659: if (PetscDefined(USE_DEBUG)) PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_test", &flg2, NULL));
1660: PetscCall(PetscOptionsGetBool(NULL, NULL, "-malloc_debug", &flg3, NULL));
1661: fname[0] = 0;
1662: PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_dump", fname, sizeof(fname), &flg1));
1663: if (flg1 && fname[0]) {
1664: PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1665: fd = fopen(sname, "w");
1666: PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1667: PetscCall(PetscMallocDump(fd));
1668: err = fclose(fd);
1669: PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1670: } else if (flg1 || flg2 || flg3) {
1671: MPI_Comm local_comm;
1673: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1674: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1675: PetscCall(PetscMallocDump(stdout));
1676: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1677: PetscCallMPI(MPI_Comm_free(&local_comm));
1678: }
1679: fname[0] = 0;
1680: PetscCall(PetscOptionsGetString(NULL, NULL, "-malloc_view", fname, sizeof(fname), &flg1));
1681: if (flg1 && fname[0]) {
1682: PetscCall(PetscSNPrintf(sname, sizeof(sname), "%s_%d", fname, rank));
1683: fd = fopen(sname, "w");
1684: PetscCheck(fd, PETSC_COMM_SELF, PETSC_ERR_FILE_OPEN, "Cannot open log file: %s", sname);
1685: PetscCall(PetscMallocView(fd));
1686: err = fclose(fd);
1687: PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fclose() failed on file");
1688: } else if (flg1) {
1689: MPI_Comm local_comm;
1691: PetscCallMPI(MPI_Comm_dup(PETSC_COMM_WORLD, &local_comm));
1692: PetscCall(PetscSequentialPhaseBegin_Private(local_comm, 1));
1693: PetscCall(PetscMallocView(stdout));
1694: PetscCall(PetscSequentialPhaseEnd_Private(local_comm, 1));
1695: PetscCallMPI(MPI_Comm_free(&local_comm));
1696: }
1697: }
1698: #endif
1700: /*
1701: Close any open dynamic libraries
1702: */
1703: PetscCall(PetscFinalize_DynamicLibraries());
1705: /* Can be destroyed only after all the options are used */
1706: PetscCall(PetscOptionsDestroyDefault());
1708: #if defined(PETSC_HAVE_NVSHMEM)
1709: if (PetscBeganNvshmem) {
1710: PetscCall(PetscNvshmemFinalize());
1711: PetscBeganNvshmem = PETSC_FALSE;
1712: }
1713: #endif
1715: PetscCall(PetscFreeMPIResources());
1717: /*
1718: Destroy any known inner MPI_Comm's and attributes pointing to them
1719: Note this will not destroy any new communicators the user has created.
1721: If all PETSc objects were not destroyed those left over objects will have hanging references to
1722: the MPI_Comms that were freed; but that is ok because those PETSc objects will never be used again
1723: */
1724: {
1725: PetscCommCounter *counter;
1726: PetscMPIInt flg;
1727: MPI_Comm icomm;
1728: union
1729: {
1730: MPI_Comm comm;
1731: void *ptr;
1732: } ucomm;
1733: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval, &ucomm, &flg));
1734: if (flg) {
1735: icomm = ucomm.comm;
1736: PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1737: PetscCheck(flg, PETSC_COMM_SELF, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1739: PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_SELF, Petsc_InnerComm_keyval));
1740: PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1741: PetscCallMPI(MPI_Comm_free(&icomm));
1742: }
1743: PetscCallMPI(MPI_Comm_get_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval, &ucomm, &flg));
1744: if (flg) {
1745: icomm = ucomm.comm;
1746: PetscCallMPI(MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg));
1747: PetscCheck(flg, PETSC_COMM_WORLD, PETSC_ERR_ARG_CORRUPT, "Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
1749: PetscCallMPI(MPI_Comm_delete_attr(PETSC_COMM_WORLD, Petsc_InnerComm_keyval));
1750: PetscCallMPI(MPI_Comm_delete_attr(icomm, Petsc_Counter_keyval));
1751: PetscCallMPI(MPI_Comm_free(&icomm));
1752: }
1753: }
1755: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Counter_keyval));
1756: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_InnerComm_keyval));
1757: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_OuterComm_keyval));
1758: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_ShmComm_keyval));
1759: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_CreationIdx_keyval));
1760: PetscCallMPI(MPI_Comm_free_keyval(&Petsc_Garbage_HMap_keyval));
1762: // Free keyvals which may be silently created by some routines
1763: if (Petsc_SharedWD_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedWD_keyval));
1764: if (Petsc_SharedTmp_keyval != MPI_KEYVAL_INVALID) PetscCallMPI(MPI_Comm_free_keyval(&Petsc_SharedTmp_keyval));
1766: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockOpen));
1767: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStdout));
1768: PetscCall(PetscSpinlockDestroy(&PetscViewerASCIISpinLockStderr));
1769: PetscCall(PetscSpinlockDestroy(&PetscCommSpinLock));
1771: if (PetscBeganMPI) {
1772: PetscMPIInt flag;
1773: PetscCallMPI(MPI_Finalized(&flag));
1774: PetscCheck(!flag, PETSC_COMM_SELF, PETSC_ERR_LIB, "MPI_Finalize() has already been called, even though MPI_Init() was called by PetscInitialize()");
1775: /* wait until the very last moment to disable error handling */
1776: PetscErrorHandlingInitialized = PETSC_FALSE;
1777: PetscCallMPI(MPI_Finalize());
1778: } else PetscErrorHandlingInitialized = PETSC_FALSE;
1780: /*
1782: Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
1783: the communicator has some outstanding requests on it. Specifically if the
1784: flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
1785: src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate()
1786: is never freed as it should be. Thus one may obtain messages of the form
1787: [ 1] 8 bytes PetscCommDuplicate() line 645 in src/sys/mpiu.c indicating the
1788: memory was not freed.
1790: */
1791: PetscCall(PetscMallocClear());
1792: PetscCall(PetscStackReset());
1794: PetscInitializeCalled = PETSC_FALSE;
1795: PetscFinalizeCalled = PETSC_TRUE;
1796: #if defined(PETSC_USE_COVERAGE)
1797: /*
1798: flush gcov, otherwise during CI the flushing continues into the next pipeline resulting in git not being able to delete directories since the
1799: gcov files are still being added to the directories as git tries to remove the directories.
1800: */
1801: __gcov_flush();
1802: #endif
1803: /* To match PetscFunctionBegin() at the beginning of this function */
1804: PetscStackClearTop;
1805: return PETSC_SUCCESS;
1806: }
1808: #if defined(PETSC_MISSING_LAPACK_lsame_)
1809: PETSC_EXTERN int lsame_(char *a, char *b)
1810: {
1811: if (*a == *b) return 1;
1812: if (*a + 32 == *b) return 1;
1813: if (*a - 32 == *b) return 1;
1814: return 0;
1815: }
1816: #endif
1818: #if defined(PETSC_MISSING_LAPACK_lsame)
1819: PETSC_EXTERN int lsame(char *a, char *b)
1820: {
1821: if (*a == *b) return 1;
1822: if (*a + 32 == *b) return 1;
1823: if (*a - 32 == *b) return 1;
1824: return 0;
1825: }
1826: #endif
1828: static inline PetscMPIInt MPIU_Allreduce_Count(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1829: {
1830: PetscMPIInt err;
1831: #if !defined(PETSC_HAVE_MPI_LARGE_COUNT)
1832: PetscMPIInt count2;
1834: PetscMPIIntCast_Internal(count, &count2);
1835: err = MPI_Allreduce((void *)inbuf, outbuf, count2, dtype, op, comm);
1836: #else
1837: err = MPI_Allreduce_c((void *)inbuf, outbuf, count, dtype, op, comm);
1838: #endif
1839: return err;
1840: }
1842: /*
1843: When count is 1 and dtype == MPIU_INT performs the reduction in PetscInt64 to check for integer overflow
1844: */
1845: PetscMPIInt MPIU_Allreduce_Private(const void *inbuf, void *outbuf, MPIU_Count count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
1846: {
1847: PetscMPIInt err;
1848: if (!PetscDefined(USE_64BIT_INDICES) && count == 1 && dtype == MPIU_INT && (op == MPI_SUM || op == MPI_PROD)) {
1849: PetscInt64 incnt, outcnt;
1850: void *inbufd, *outbufd;
1852: if (inbuf != MPI_IN_PLACE) {
1853: incnt = *(PetscInt32 *)inbuf;
1854: inbufd = &incnt;
1855: } else {
1856: outcnt = *(PetscInt32 *)outbuf;
1857: inbufd = (void *)MPI_IN_PLACE;
1858: }
1859: outbufd = &outcnt;
1860: err = MPIU_Allreduce_Count(inbufd, outbufd, count, MPIU_INT64, op, comm);
1861: if (!err && outcnt > PETSC_INT_MAX) err = MPI_ERR_OTHER;
1862: *(PetscInt32 *)outbuf = (PetscInt32)outcnt;
1863: } else {
1864: err = MPIU_Allreduce_Count(inbuf, outbuf, count, dtype, op, comm);
1865: }
1866: return err;
1867: }